summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_bufs.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c14
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c513
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_dma.c4
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c45
-rw-r--r--drivers/gpu/drm/drm_edid.c829
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c965
-rw-r--r--drivers/gpu/drm/drm_fops.c20
-rw-r--r--drivers/gpu/drm/drm_gem.c107
-rw-r--r--drivers/gpu/drm/drm_hashtab.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_proc.c1
-rw-r--r--drivers/gpu/drm/drm_scatter.c1
-rw-r--r--drivers/gpu/drm/drm_stub.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c47
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c333
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h333
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1530
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c213
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c533
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h492
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c51
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h112
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c19
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c169
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1779
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c530
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h51
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c156
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c230
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c137
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c281
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c26
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c108
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c851
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1500
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c245
-rw-r--r--drivers/gpu/drm/nouveau/Makefile7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c1103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h132
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c218
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h112
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c273
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c568
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c621
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c125
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c143
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c566
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c29
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c (renamed from drivers/gpu/drm/radeon/radeon_fixed.h)75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2383
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c25
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c1
-rw-r--r--drivers/gpu/drm/radeon/Makefile16
-rw-r--r--drivers/gpu/drm/radeon/atom.c109
-rw-r--r--drivers/gpu/drm/radeon/atom.h8
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7378
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c535
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c66
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2228
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1354
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h183
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1023
-rw-r--r--drivers/gpu/drm/radeon/r100.c1038
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h3
-rw-r--r--drivers/gpu/drm/radeon/r100d.h164
-rw-r--r--drivers/gpu/drm/radeon/r200.c52
-rw-r--r--drivers/gpu/drm/radeon/r300.c337
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c102
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h103
-rw-r--r--drivers/gpu/drm/radeon/r520.c38
-rw-r--r--drivers/gpu/drm/radeon/r600.c918
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c113
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c45
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c259
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c904
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c255
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h67
-rw-r--r--drivers/gpu/drm/radeon/r600d.h516
-rw-r--r--drivers/gpu/drm/radeon/radeon.h420
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c877
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h486
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1026
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c258
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c439
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c551
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c458
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c591
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c373
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c108
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c767
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h101
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c735
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c134
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c207
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c135
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen611
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4202
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600762
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5153
-rw-r--r--drivers/gpu/drm/radeon/rs400.c59
-rw-r--r--drivers/gpu/drm/radeon/rs600.c312
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h133
-rw-r--r--drivers/gpu/drm/radeon/rs690.c416
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c345
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c316
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c134
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c123
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c855
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c86
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c1
-rw-r--r--drivers/gpu/drm/via/via_irq.c4
-rw-r--r--drivers/gpu/drm/via/via_video.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c101
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c210
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c189
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
236 files changed, 39366 insertions, 14486 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c59003963..88910e5a2c77 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
+ select POWER_SUPPLY
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f1..abe3f446ca48 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
+drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index d68888fe3df9..ba38e0147220 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,6 +33,7 @@
#include "drmP.h"
#include <linux/module.h>
+#include <linux/slab.h>
#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a67..3f46772f0cb2 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 000000000000..55d03ed05000
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include "drm_buffer.h"
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: Pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ /* Allocating pointer table to end of structure makes drm_buffer
+ * variable sized */
+ *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+ GFP_KERNEL);
+
+ if (*buf == NULL) {
+ DRM_ERROR("Failed to allocate drm buffer object to hold"
+ " %d bytes in %d pages.\n",
+ size, nr_pages);
+ return -ENOMEM;
+ }
+
+ (*buf)->size = size;
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ (*buf)->data[idx] =
+ kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+ GFP_KERNEL);
+
+
+ if ((*buf)->data[idx] == NULL) {
+ DRM_ERROR("Failed to allocate %dth page for drm"
+ " buffer with %d bytes and %d pages.\n",
+ idx + 1, size, nr_pages);
+ goto error_out;
+ }
+
+ }
+
+ return 0;
+
+error_out:
+
+ /* Only last element can be null pointer so check for it first. */
+ if ((*buf)->data[idx])
+ kfree((*buf)->data[idx]);
+
+ for (--idx; idx >= 0; --idx)
+ kfree((*buf)->data[idx]);
+
+ kfree(*buf);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ if (size > buf->size) {
+ DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+ " %d bytes space\n",
+ size, buf->size);
+ return -EFAULT;
+ }
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ if (DRM_COPY_FROM_USER(buf->data[idx],
+ user_data + idx * PAGE_SIZE,
+ min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+ DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+ " (%p) %dth page.\n",
+ user_data, buf, idx);
+ return -EFAULT;
+
+ }
+ }
+ buf->iterator = 0;
+ return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+ if (buf != NULL) {
+
+ int nr_pages = buf->size / PAGE_SIZE + 1;
+ int idx;
+ for (idx = 0; idx < nr_pages; ++idx)
+ kfree(buf->data[idx]);
+
+ kfree(buf);
+ }
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj)
+{
+ int idx = drm_buffer_index(buf);
+ int page = drm_buffer_page(buf);
+ void *obj = 0;
+
+ if (idx + objsize <= PAGE_SIZE) {
+ obj = &buf->data[page][idx];
+ } else {
+ /* The object is split which forces copy to temporary object.*/
+ int beginsz = PAGE_SIZE - idx;
+ memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+ memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+ objsize - beginsz);
+
+ obj = stack_obj;
+ }
+
+ drm_buffer_advance(buf, objsize);
+ return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8417cc4c43f1..2092e7bb788f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -34,6 +34,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/log2.h>
#include <asm/shmparam.h>
#include "drmP.h"
@@ -960,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d91fb8c0b7b3..57cea01c4ffb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -30,9 +30,11 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/list.h>
+#include <linux/slab.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@@ -493,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@@ -857,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
goto out_err2;
+ }
}
if (fb->funcs->dirty) {
@@ -2349,7 +2351,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2361,7 +2363,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935fa..9b2a54117c91 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
@@ -153,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@@ -262,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
- }
- i++;
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@@ -836,11 +526,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
- } else if ((set->fb->bits_per_pixel !=
- set->crtc->fb->bits_per_pixel) ||
- set->fb->depth != set->crtc->fb->depth)
- fb_changed = true;
- else
+ } else
fb_changed = true;
}
@@ -939,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -987,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- printk(KERN_INFO "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1126,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
@@ -1203,3 +807,114 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status, status;
+ bool repoll = false, changed = false;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
+ continue;
+
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ status = connector->funcs->detect(connector);
+ if (old_status != status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
+
+ if (repoll) {
+ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+ int ret;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+
+ if (poll) {
+ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+ slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ /* schedule a slow work asap */
+ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+ .execute = output_poll_execute,
+};
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9903f270e440..677b275fa721 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413fb..252cbd74df0e 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
{
int i;
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
- memset(dev->dma, 0, sizeof(*dev->dma));
-
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index 548887c8506f..f7eba0a0973a 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a20..4a66201edaec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -47,6 +47,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm_core.h"
@@ -125,28 +126,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c97330412..f87bf104df7a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -27,15 +28,15 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -60,12 +61,12 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
+
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
@@ -85,6 +86,8 @@ static struct edid_quirk {
/* Envision Peripherals, Inc. EN-7100e */
{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
/* Funai Electronics PM36B */
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -107,51 +110,64 @@ static struct edid_quirk {
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
+/*** DDC fetch and block validation ***/
-/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
-/**
- * edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
*/
-static bool edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
{
- int i, score = 0;
+ int i;
u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (raw_edid[0] == 0x00) {
+ int score = 0;
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
}
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
return 1;
@@ -165,6 +181,157 @@ bad:
}
/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_is_valid);
+
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf + start,
+ }
+ };
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+
+ return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, *new;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, j,
+ EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+ }
+
+ return block;
+
+carp:
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
+/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
* @vendor: vendor string
@@ -332,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -423,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
@@ -494,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh)
{
int i;
struct drm_display_mode *ptr, *mode;
@@ -513,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
}
return mode;
}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
@@ -533,22 +805,20 @@ bad_std_timing(u8 a, u8 b)
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -569,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
- mode->vsync_start = mode->vsync_start - 1;
- mode->vsync_end = mode->vsync_end - 1;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
@@ -590,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ kfree(mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -707,25 +1014,16 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
drm_mode_do_interlace_quirk(mode, pt);
+ drm_mode_set_name(mode);
+
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -808,10 +1106,6 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
@@ -839,19 +1133,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
/**
* add_standard_modes - get std. modes from EDID and add them
@@ -862,22 +1143,14 @@ static int standard_timing_level(struct edid *edid)
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -887,36 +1160,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
return false;
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ if (!mode_in_vsync_range(mode, edid, t))
return false;
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
+ if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
- }
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
return true;
}
@@ -925,15 +1248,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -994,13 +1318,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= num_est3_modes)
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
@@ -1021,7 +1432,8 @@ static int add_detailed_modes(struct drm_connector *connector,
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
+ modes += drm_gtf_modes_for_range(connector, edid,
+ timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
@@ -1030,8 +1442,8 @@ static int add_detailed_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -1041,6 +1453,9 @@ static int add_detailed_modes(struct drm_connector *connector,
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
+ case EDID_DETAIL_EST_TIMINGS:
+ modes += drm_est3_modes(connector, timing);
+ break;
default:
break;
}
@@ -1064,7 +1479,10 @@ static int add_detailed_info(struct drm_connector *connector,
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ int preferred = (i == 0);
+
+ if (preferred && edid->version == 1 && edid->revision < 4)
+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
@@ -1094,39 +1512,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- int edid_ext_num;
int start_offset, end_offset;
- int timing_level;
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+ if (edid->version == 1 && edid->revision < 3)
return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ if (!edid->extensions)
return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
+ if (i == edid->extensions)
return 0;
- }
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
@@ -1138,7 +1539,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return 0;
}
- timing_level = standard_timing_level(edid);
end_offset = EDID_LENGTH;
end_offset -= sizeof(struct detailed_timing);
for (i = start_offset; i < end_offset;
@@ -1150,123 +1550,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
@@ -1279,7 +1562,7 @@ EXPORT_SYMBOL(drm_get_edid);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
@@ -1287,19 +1570,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
if (edid == NULL || edid->extensions == 0)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
-
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num)
+ if (i == edid->extensions)
goto end;
/* Data block offset in CEA extension block */
@@ -1346,7 +1625,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (edid == NULL) {
return 0;
}
- if (!edid_is_valid(edid)) {
+ if (!drm_edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
@@ -1354,10 +1633,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We don't quite implement this yet, but we're close.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e90552dc4..719662034bbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,7 +27,9 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/kernel.h>
#include <linux/sysrq.h>
+#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
#include "drm_crtc.h"
@@ -40,30 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
-static int my_atoi(const char *name)
-{
- int val = 0;
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
+ return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -78,7 +83,7 @@ static int my_atoi(const char *name)
*
* enable/enable Digital/disable bit at the end
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
const char *mode_option)
{
const char *name;
@@ -88,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector = fb_helper_conn->connector;
- if (!fb_help_conn)
+ if (!fb_helper_conn)
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
@@ -111,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
- refresh = my_atoi(&name[i+1]);
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -121,7 +126,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -130,7 +135,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
break;
case 'x':
if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = 1;
} else
goto done;
@@ -141,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
cvt = 1;
break;
case 'R':
- if (!cvt)
+ if (cvt)
rb = 1;
break;
case 'm':
@@ -170,7 +175,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
}
}
if (i < 0 && yres_specified) {
- xres = my_atoi(name);
+ xres = simple_strtol(name, NULL, 10);
res_specified = 1;
}
done:
@@ -217,18 +222,21 @@ done:
return true;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- struct drm_connector *connector;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ for (i = 0; i < fb_helper->connector_count; i++) {
char *option = NULL;
+ fb_helper_conn = fb_helper->connector_info[i];
+
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
continue;
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
}
return 0;
}
@@ -256,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
- DRM_ERROR("panic occurred, switching back to text console\n");
+ printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
return 0;
}
@@ -297,6 +305,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_on(struct fb_info *info)
@@ -304,40 +314,44 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
- int i;
+ int i, j;
/*
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Walk the connectors & encoders on this fb turning them on */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ DRM_MODE_DPMS_ON);
+ }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
}
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -345,39 +359,43 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
- int i;
+ int i, j;
/*
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Walk the connectors on this fb and mark them off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = dpms_mode;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ dpms_mode);
+ }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
}
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
}
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -412,50 +430,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
+ if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+ fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -519,20 +568,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -560,41 +604,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -694,27 +703,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
int i;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLCOK SET\n");
+ DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ return ret;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -729,14 +736,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -744,209 +746,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
- }
-
- info = fb->fbdev;
- fb_helper = info->par;
-
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
-
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
- modeset->crtc = crtc;
- crtc_count++;
+ info = fb_helper->fbdev;
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
} else {
drm_fb_helper_set_par(info);
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "registered panic notifier\n");
+ printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -965,10 +896,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1036,3 +968,464 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ if (cmdline_mode->cvt)
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev,
+ cmdline_mode->xres, cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ else
+ mode = drm_gtf_mode(fb_helper_conn->connector->dev,
+ cmdline_mode->xres, cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return false;
+
+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (!bound && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ return false;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df3bb42..e7aace20981f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#include <linux/poll.h>
+#include <linux/slab.h>
#include <linux/smp_lock.h>
static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -140,14 +141,16 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ else if (dev->dev_mapping != inode->i_mapping)
+ retcode = -ENODEV;
+ }
+ mutex_unlock(&dev->struct_mutex);
}
- mutex_unlock(&dev->struct_mutex);
return retcode;
}
@@ -240,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770f294e..33dad3fa6043 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(obj->filp))
+ return -ENOMEM;
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
+ if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
+ /* Object_init mangles the global counters - readjust them. */
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -192,9 +211,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -325,9 +342,7 @@ again:
}
err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -358,9 +373,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -390,7 +403,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
- drm_gem_object_handle_unreference(obj);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -403,16 +416,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
}
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+}
+EXPORT_SYMBOL(drm_gem_object_release);
+
/**
* Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
*
* Frees the object
*/
@@ -426,15 +448,37 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
+ * Called after the last reference to the object has been lost.
+ * Must be called without holding struct_mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free_unlocked(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_free_object_unlocked != NULL)
+ dev->driver->gem_free_object_unlocked(obj);
+ else if (dev->driver->gem_free_object != NULL) {
+ mutex_lock(&dev->struct_mutex);
+ dev->driver->gem_free_object(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+EXPORT_SYMBOL(drm_gem_object_free_unlocked);
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -458,8 +502,10 @@ drm_gem_object_handle_free(struct kref *kref)
/*
* The object name held a reference to this object, drop
* that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
*/
- drm_gem_object_unreference(obj);
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
@@ -477,11 +523,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index f36b21c5b2e1..a93d7b4ddaa6 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -35,6 +35,7 @@
#include "drmP.h"
#include "drm_hashtab.h"
#include <linux/hash.h>
+#include <linux/slab.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b98384dbd9a7..a263b7070fc6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#include <linux/interrupt.h> /* For task queue support */
+#include <linux/slab.h>
#include <linux/vgaarb.h>
/**
@@ -475,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->driver->disable_vblank(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
dev->vblank_enabled[crtc] = 0;
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4865f99989c..7732268eced2 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c776..f1f473ea97d3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
EXPORT_SYMBOL(drm_gtf_mode);
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e68ebf92fa2a..2ea9ad4a8d69 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -37,6 +37,7 @@
*/
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include "drmP.h"
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index d379c4f2892f..a9ba6b69ad35 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -38,6 +38,7 @@
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
/***************************************************
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index c7823c863d4f..9034c4c6100d 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -32,6 +32,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include "drmP.h"
#define DEBUG_SCATTER 0
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index ad73e141afdb..a0c365f2e521 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm_core.h"
@@ -515,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
}
driver = dev->driver;
- drm_vblank_cleanup(dev);
-
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -536,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
dev->agp = NULL;
}
+ drm_vblank_cleanup(dev);
+
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 7e42b7e9d43a..101d381e9d86 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/kdev_t.h>
+#include <linux/gfp.h>
#include <linux/err.h>
#include "drm_sysfs.h"
@@ -70,19 +71,17 @@ static int drm_class_resume(struct device *dev)
return 0;
}
-/* Display the version of drm_core. This doesn't work right in current design */
-static ssize_t version_show(struct class *dev, char *buf)
-{
- return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
- CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
-}
-
static char *drm_devnode(struct device *dev, mode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
-static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+static CLASS_ATTR_STRING(version, S_IRUGO,
+ CORE_NAME " "
+ __stringify(CORE_MAJOR) "."
+ __stringify(CORE_MINOR) "."
+ __stringify(CORE_PATCHLEVEL) " "
+ CORE_DATE);
/**
* drm_sysfs_create - create a struct drm_sysfs_class structure
@@ -109,7 +108,7 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
class->suspend = drm_class_suspend;
class->resume = drm_class_resume;
- err = class_create_file(class, &class_attr_version);
+ err = class_create_file(class, &class_attr_version.attr);
if (err)
goto err_out_class;
@@ -132,7 +131,7 @@ void drm_sysfs_destroy(void)
{
if ((drm_class == NULL) || (IS_ERR(drm_class)))
return;
- class_remove_file(drm_class, &class_attr_version);
+ class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
}
@@ -194,8 +193,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -334,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
@@ -355,7 +355,10 @@ static struct bin_attribute edid_attr = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- int ret = 0, i, j;
+ int attr_cnt = 0;
+ int opt_cnt = 0;
+ int i;
+ int ret = 0;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
@@ -378,8 +381,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
/* Standard attributes */
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+ for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@@ -395,8 +398,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+ for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@@ -415,10 +418,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return 0;
err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
- device_remove_file(&connector->kdev,
- &connector_attrs[i]);
+ for (i = 0; i < opt_cnt; i++)
+ device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+ for (i = 0; i < attr_cnt; i++)
+ device_remove_file(&connector->kdev, &connector_attrs[i]);
device_unregister(&connector->kdev);
out:
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 4ac900f4647f..c3b13fb41d0c 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#if defined(__ia64__)
#include <linux/efi.h>
+#include <linux/slab.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index de32d22a8c39..997d91707ad2 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -36,6 +36,7 @@
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 06bd732e6463..65759a9a85c8 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/pagemap.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
#define I830_BUF_FREE 2
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e1..da78f2c0d909 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_ringbuffer.o \
intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
@@ -33,3 +34,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e2..0d6ff640e1c6 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
- */
- void (*restore)(struct intel_dvo_device *dvo);
-
- /*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87d..14d59804acd7 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b2..6f1944b24441 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
-};
-
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
}
}
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0f..a2ec3f487202 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
-}
-
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a80..9b8e6765cf26 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116a..56f66426207f 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -216,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
- if (ctl2 & TFP410_CTL_2_HTPLG)
+ if (ctl2 & TFP410_CTL_2_RSEN)
ret = connector_status_connected;
else
ret = connector_status_disconnected;
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..9214119c0154 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -76,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->mm.active_list;
+ head = &dev_priv->render_ring.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
@@ -95,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
- struct drm_gem_object *obj = obj_priv->obj;
-
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
+ &obj_priv->base,
get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
+ obj_priv->base.size,
+ obj_priv->base.read_domains,
+ obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->base.name)
+ seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *gem_request;
seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+ list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -162,7 +163,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -225,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
} else {
struct drm_i915_gem_object *obj_priv;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
seq_printf(m, "Fenced object[%2d] = %p: %s "
"%08x %08zx %08x %s %08x %08x %d",
i, obj, get_pin_flag(obj_priv),
@@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
int i;
volatile u32 *hws;
- hws = (volatile u32 *)dev_priv->hw_status_page;
+ hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
if (hws == NULL)
return 0;
@@ -287,8 +288,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
+ list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+ list) {
+ obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
@@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
u8 *virt;
uint32_t *ptr, off;
- if (!dev_priv->ring.ring_obj) {
+ if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
return 0;
}
- virt = dev_priv->ring.virtual_start;
+ virt = dev_priv->render_ring.virtual_start;
- for (off = 0; off < dev_priv->ring.Size; off += 4) {
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -344,12 +346,42 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
return 0;
}
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +389,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
+ int i, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -368,6 +401,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +413,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
+ seq_printf(m, "seqno: 0x%08x\n", error->seqno);
+
+ if (error->active_bo_count) {
+ seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
+
+ for (i = 0; i < error->active_bo_count; i++) {
+ seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
+ error->active_bo[i].gtt_offset,
+ error->active_bo[i].size,
+ error->active_bo[i].read_domains,
+ error->active_bo[i].write_domain,
+ error->active_bo[i].seqno,
+ pin_flag(error->active_bo[i].pinned),
+ tiling_flag(error->active_bo[i].tiling),
+ dirty_flag(error->active_bo[i].dirty),
+ purgeable_flag(error->active_bo[i].purgeable));
+
+ if (error->active_bo[i].name)
+ seq_printf(m, " (name: %d)", error->active_bo[i].name);
+ if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
+
+ seq_printf(m, "\n");
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
+ if (error->batchbuffer[i]) {
+ struct drm_i915_error_object *obj = error->batchbuffer[i];
+
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+ }
+
+ if (error->ringbuffer) {
+ struct drm_i915_error_object *obj = error->ringbuffer;
+
+ seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +473,199 @@ out:
return 0;
}
+static int i915_rstdby_delays(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 crstanddelay = I915_READ16(CRSTANDVID);
+
+ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+ return 0;
+}
+
+static int i915_cur_delayinfo(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+
+ return 0;
+}
+
+static int i915_delayfreq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 delayfreq;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
+ }
+
+ return 0;
+}
+
+static inline int MAP_TO_MV(int map)
+{
+ return 1250 - (map * 25);
+}
+
+static int i915_inttoext_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 inttoext;
+ int i;
+
+ for (i = 1; i <= 32; i++) {
+ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+ }
+
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+ u16 crstandvid = I915_READ16(CRSTANDVID);
+
+ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ "yes" : "no");
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+ seq_printf(m, "SW control enabled: %s\n",
+ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+ seq_printf(m, "Gated voltage change: %s\n",
+ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!I915_HAS_FBC(dev)) {
+ seq_printf(m, "FBC unsupported on this chipset\n");
+ return 0;
+ }
+
+ if (intel_fbc_enabled(dev)) {
+ seq_printf(m, "FBC enabled\n");
+ } else {
+ seq_printf(m, "FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
+ case FBC_STOLEN_TOO_SMALL:
+ seq_printf(m, "not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ seq_printf(m, "mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ seq_printf(m, "mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ seq_printf(m, "FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ seq_printf(m, "scanout buffer not tiled");
+ break;
+ case FBC_MULTIPLE_PIPES:
+ seq_printf(m, "multiple pipes are enabled");
+ break;
+ default:
+ seq_printf(m, "unknown reason");
+ }
+ seq_printf(m, "\n");
+ }
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+ if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+ seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
+ "disabled");
+
+ return 0;
+}
+
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
+
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
+
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+ return 0;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -503,6 +783,15 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
+ {"i915_rstdby_delays", i915_rstdby_delays, 0},
+ {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+ {"i915_delayfreq_table", i915_delayfreq_table, 0},
+ {"i915_inttoext_table", i915_inttoext_table, 0},
+ {"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_emon_status", i915_emon_status, 0},
+ {"i915_gfxec", i915_gfxec, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..2305a1234f1e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,84 +35,10 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/vgaarb.h>
-
-/* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
- u32 last_acthd = I915_READ(acthd_reg);
- u32 acthd;
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
- trace_i915_ring_wait_begin (dev);
-
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
- return 0;
- }
-
- if (dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
-
- if (ring->head != last_head)
- i = 0;
- if (acthd != last_acthd)
- i = 0;
-
- last_head = ring->head;
- last_acthd = acthd;
- msleep_interruptible(10);
-
- }
-
- trace_i915_ring_wait_end (dev);
- return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- volatile unsigned int *virt;
- int rem;
-
- rem = dev_priv->ring.Size - dev_priv->ring.tail;
- if (dev_priv->ring.space < rem) {
- int ret = i915_wait_ring(dev, rem, __func__);
- if (ret)
- return ret;
- }
- dev_priv->ring.space -= rem;
-
- virt = (unsigned int *)
- (dev_priv->ring.virtual_start + dev_priv->ring.tail);
- rem /= 4;
- while (rem--)
- *virt++ = MI_NOOP;
-
- dev_priv->ring.tail = 0;
-
- return 0;
-}
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
/**
* Sets up the hardware status page for devices that need a physical address
@@ -129,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
if (IS_I965G(dev))
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -155,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
+ if (dev_priv->render_ring.status_page.gfx_addr) {
+ dev_priv->render_ring.status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -168,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
/*
* We should never lose context on the ring with modesetting
@@ -181,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
- ring->space += ring->Size;
+ ring->space += ring->size;
if (!dev->primary->master)
return;
@@ -201,12 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- dev_priv->ring.map.handle = NULL;
- dev_priv->ring.map.size = 0;
- }
+ mutex_lock(&dev->struct_mutex);
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@@ -229,24 +155,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->ring.ring_obj != NULL) {
+ if (dev_priv->render_ring.gem_object != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->ring.Size = init->ring_size;
+ dev_priv->render_ring.size = init->ring_size;
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ dev_priv->render_ring.map.offset = init->ring_start;
+ dev_priv->render_ring.map.size = init->ring_size;
+ dev_priv->render_ring.map.type = 0;
+ dev_priv->render_ring.map.flags = 0;
+ dev_priv->render_ring.map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+ drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
+ if (dev_priv->render_ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -254,7 +180,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -274,26 +200,29 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (dev_priv->ring.map.handle == NULL) {
+ ring = &dev_priv->render_ring;
+
+ if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
+ if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
- dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ ring->setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -403,9 +332,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- RING_LOCALS;
- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+ if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
return -EINVAL;
BEGIN_LP_RING((dwords+1)&~1);
@@ -438,9 +366,7 @@ i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box = boxes[i];
- RING_LOCALS;
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -477,7 +403,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -531,10 +456,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i = 0, count;
- RING_LOCALS;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -583,7 +506,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
- RING_LOCALS;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -636,7 +558,8 @@ static int i915_quiescent(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+ return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+ dev_priv->render_ring.size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -823,6 +746,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
/* depends on GEM */
value = dev_priv->has_gem;
break;
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -878,6 +804,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -894,7 +821,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
@@ -905,19 +832,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ ring->status_page.page_addr = dev_priv->hws_map.handle;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
+ ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
+ ring->status_page.page_addr);
return 0;
}
@@ -933,6 +860,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret = 0;
+
+ if (IS_I965G(dev))
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
+ ret = 0;
+ goto out;
+ }
+#endif
+
+ /* Get some space for it */
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ goto out;
+ }
+
+ if (IS_I965G(dev))
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+out:
+ return ret;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ temp &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
/**
* i915_probe_agp - get AGP bootup configuration
* @pdev: PCI device
@@ -978,59 +1019,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
+ if (IS_GEN6(dev)) {
+ /* SNB has memory control reg at 0x50.w */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
+
+ switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case SNB_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ stolen = 192 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ stolen = 288 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ stolen = 320 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ stolen = 384 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ stolen = 416 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ stolen = 448 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ stolen = 480 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ stolen = 512 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & SNB_GMCH_GMS_STOLEN_MASK);
+ return -1;
+ }
+ } else {
+ switch (tmp & INTEL_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ stolen = 1 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ stolen = 4 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ stolen = 8 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ stolen = 16 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ stolen = 48 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_GMCH_GMS_MASK);
+ return -1;
+ }
}
+
*preallocated_size = stolen - overhead;
*start = overhead;
@@ -1064,7 +1169,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1126,13 +1231,14 @@ static void i915_warn_stolen(struct drm_device *dev)
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *compressed_llb;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
if (!compressed_fb) {
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
return;
}
@@ -1140,6 +1246,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb) {
i915_warn_stolen(dev);
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
return;
}
@@ -1173,19 +1280,30 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ intel_disable_fbc(dev);
+ dev_priv->compressed_fb = compressed_fb;
+
if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1199,6 +1317,34 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "i915: switched on\n");
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+ drm_kms_helper_poll_enable(dev);
+ } else {
+ printk(KERN_ERR "i915: switched off\n");
+ drm_kms_helper_poll_disable(dev);
+ i915_suspend(dev, pmm);
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
static int i915_load_modeset_init(struct drm_device *dev,
unsigned long prealloc_start,
unsigned long prealloc_size,
@@ -1258,13 +1404,23 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_ringbuffer;
+
+ ret = vga_switcheroo_register_client(dev->pdev,
+ i915_switcheroo_set_state,
+ i915_switcheroo_can_switch);
+ if (ret)
+ goto cleanup_vga_client;
+
+ /* IIR "flip pending" bit means done if this bit is set */
+ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
+ dev_priv->flip_pending_is_done = true;
intel_modeset_init(dev);
ret = drm_irq_install(dev);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_vga_switcheroo;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1276,12 +1432,23 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- drm_helper_initial_config(dev);
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_irq;
+ drm_kms_helper_poll_init(dev);
return 0;
-destroy_ringbuffer:
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_vga_switcheroo:
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+cleanup_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@@ -1310,14 +1477,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_PINEVIEW(dev))
- return;
-
tmp = I915_READ(CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
@@ -1346,8 +1510,525 @@ static void i915_get_mem_freq(struct drm_device *dev)
dev_priv->mem_freq = 800;
break;
}
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+struct v_table {
+ u8 vid;
+ unsigned long vd; /* in .1 mil */
+ unsigned long vm; /* in .1 mil */
+ u8 pvid;
+};
+
+static struct v_table v_table[] = {
+ { 0, 16125, 15000, 0x7f, },
+ { 1, 16000, 14875, 0x7e, },
+ { 2, 15875, 14750, 0x7d, },
+ { 3, 15750, 14625, 0x7c, },
+ { 4, 15625, 14500, 0x7b, },
+ { 5, 15500, 14375, 0x7a, },
+ { 6, 15375, 14250, 0x79, },
+ { 7, 15250, 14125, 0x78, },
+ { 8, 15125, 14000, 0x77, },
+ { 9, 15000, 13875, 0x76, },
+ { 10, 14875, 13750, 0x75, },
+ { 11, 14750, 13625, 0x74, },
+ { 12, 14625, 13500, 0x73, },
+ { 13, 14500, 13375, 0x72, },
+ { 14, 14375, 13250, 0x71, },
+ { 15, 14250, 13125, 0x70, },
+ { 16, 14125, 13000, 0x6f, },
+ { 17, 14000, 12875, 0x6e, },
+ { 18, 13875, 12750, 0x6d, },
+ { 19, 13750, 12625, 0x6c, },
+ { 20, 13625, 12500, 0x6b, },
+ { 21, 13500, 12375, 0x6a, },
+ { 22, 13375, 12250, 0x69, },
+ { 23, 13250, 12125, 0x68, },
+ { 24, 13125, 12000, 0x67, },
+ { 25, 13000, 11875, 0x66, },
+ { 26, 12875, 11750, 0x65, },
+ { 27, 12750, 11625, 0x64, },
+ { 28, 12625, 11500, 0x63, },
+ { 29, 12500, 11375, 0x62, },
+ { 30, 12375, 11250, 0x61, },
+ { 31, 12250, 11125, 0x60, },
+ { 32, 12125, 11000, 0x5f, },
+ { 33, 12000, 10875, 0x5e, },
+ { 34, 11875, 10750, 0x5d, },
+ { 35, 11750, 10625, 0x5c, },
+ { 36, 11625, 10500, 0x5b, },
+ { 37, 11500, 10375, 0x5a, },
+ { 38, 11375, 10250, 0x59, },
+ { 39, 11250, 10125, 0x58, },
+ { 40, 11125, 10000, 0x57, },
+ { 41, 11000, 9875, 0x56, },
+ { 42, 10875, 9750, 0x55, },
+ { 43, 10750, 9625, 0x54, },
+ { 44, 10625, 9500, 0x53, },
+ { 45, 10500, 9375, 0x52, },
+ { 46, 10375, 9250, 0x51, },
+ { 47, 10250, 9125, 0x50, },
+ { 48, 10125, 9000, 0x4f, },
+ { 49, 10000, 8875, 0x4e, },
+ { 50, 9875, 8750, 0x4d, },
+ { 51, 9750, 8625, 0x4c, },
+ { 52, 9625, 8500, 0x4b, },
+ { 53, 9500, 8375, 0x4a, },
+ { 54, 9375, 8250, 0x49, },
+ { 55, 9250, 8125, 0x48, },
+ { 56, 9125, 8000, 0x47, },
+ { 57, 9000, 7875, 0x46, },
+ { 58, 8875, 7750, 0x45, },
+ { 59, 8750, 7625, 0x44, },
+ { 60, 8625, 7500, 0x43, },
+ { 61, 8500, 7375, 0x42, },
+ { 62, 8375, 7250, 0x41, },
+ { 63, 8250, 7125, 0x40, },
+ { 64, 8125, 7000, 0x3f, },
+ { 65, 8000, 6875, 0x3e, },
+ { 66, 7875, 6750, 0x3d, },
+ { 67, 7750, 6625, 0x3c, },
+ { 68, 7625, 6500, 0x3b, },
+ { 69, 7500, 6375, 0x3a, },
+ { 70, 7375, 6250, 0x39, },
+ { 71, 7250, 6125, 0x38, },
+ { 72, 7125, 6000, 0x37, },
+ { 73, 7000, 5875, 0x36, },
+ { 74, 6875, 5750, 0x35, },
+ { 75, 6750, 5625, 0x34, },
+ { 76, 6625, 5500, 0x33, },
+ { 77, 6500, 5375, 0x32, },
+ { 78, 6375, 5250, 0x31, },
+ { 79, 6250, 5125, 0x30, },
+ { 80, 6125, 5000, 0x2f, },
+ { 81, 6000, 4875, 0x2e, },
+ { 82, 5875, 4750, 0x2d, },
+ { 83, 5750, 4625, 0x2c, },
+ { 84, 5625, 4500, 0x2b, },
+ { 85, 5500, 4375, 0x2a, },
+ { 86, 5375, 4250, 0x29, },
+ { 87, 5250, 4125, 0x28, },
+ { 88, 5125, 4000, 0x27, },
+ { 89, 5000, 3875, 0x26, },
+ { 90, 4875, 3750, 0x25, },
+ { 91, 4750, 3625, 0x24, },
+ { 92, 4625, 3500, 0x23, },
+ { 93, 4500, 3375, 0x22, },
+ { 94, 4375, 3250, 0x21, },
+ { 95, 4250, 3125, 0x20, },
+ { 96, 4125, 3000, 0x1f, },
+ { 97, 4125, 3000, 0x1e, },
+ { 98, 4125, 3000, 0x1d, },
+ { 99, 4125, 3000, 0x1c, },
+ { 100, 4125, 3000, 0x1b, },
+ { 101, 4125, 3000, 0x1a, },
+ { 102, 4125, 3000, 0x19, },
+ { 103, 4125, 3000, 0x18, },
+ { 104, 4125, 3000, 0x17, },
+ { 105, 4125, 3000, 0x16, },
+ { 106, 4125, 3000, 0x15, },
+ { 107, 4125, 3000, 0x14, },
+ { 108, 4125, 3000, 0x13, },
+ { 109, 4125, 3000, 0x12, },
+ { 110, 4125, 3000, 0x11, },
+ { 111, 4125, 3000, 0x10, },
+ { 112, 4125, 3000, 0x0f, },
+ { 113, 4125, 3000, 0x0e, },
+ { 114, 4125, 3000, 0x0d, },
+ { 115, 4125, 3000, 0x0c, },
+ { 116, 4125, 3000, 0x0b, },
+ { 117, 4125, 3000, 0x0a, },
+ { 118, 4125, 3000, 0x09, },
+ { 119, 4125, 3000, 0x08, },
+ { 120, 1125, 0, 0x07, },
+ { 121, 1000, 0, 0x06, },
+ { 122, 875, 0, 0x05, },
+ { 123, 750, 0, 0x04, },
+ { 124, 625, 0, 0x03, },
+ { 125, 500, 0, 0x02, },
+ { 126, 375, 0, 0x01, },
+ { 127, 0, 0, 0x00, },
+};
+
+struct cparams {
+ int i;
+ int t;
+ int m;
+ int c;
+};
+
+static struct cparams cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ unsigned long val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(v_table); i++) {
+ if (v_table[i].pvid == pxvid) {
+ if (IS_MOBILE(dev_priv->dev))
+ val = v_table[i].vm;
+ else
+ val = v_table[i].vd;
+ }
+ }
+
+ return val;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
}
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1361,11 +2042,10 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1443,13 +2123,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->has_gem = 0;
}
+ if (dev_priv->has_gem == 0 &&
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
i915_gem_load(dev);
/* Init HWS */
@@ -1459,7 +2149,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_workqueue_free;
}
- i915_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_IRONLAKE(dev))
+ i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1477,7 +2170,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->user_irq_refcount = 0;
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1490,6 +2182,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
+ intel_detect_pch(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
@@ -1504,6 +2198,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
+
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
return 0;
out_workqueue_free:
@@ -1523,6 +2223,12 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_destroy_error_state(dev);
+
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
@@ -1534,6 +2240,8 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_modeset_cleanup(dev);
+
/*
* free the memory space allocated for the child device
* config parsed from VBT
@@ -1544,6 +2252,7 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev_num = 0;
}
drm_irq_uninstall(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1556,19 +2265,21 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
intel_cleanup_overlay(dev);
}
+ intel_teardown_mchbar(dev);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
@@ -1611,6 +2322,7 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_fb_helper_restore();
+ vga_switcheroo_process_delayed_switch();
return;
}
@@ -1655,29 +2367,29 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..423dc90c1e20 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
static struct drm_driver driver;
+extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -59,88 +60,99 @@ static struct drm_driver driver;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
-const static struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info = {
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_845g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+static const struct intel_device_info intel_i85x_info = {
+ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info = {
.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+static const struct intel_device_info intel_i915gm_info = {
+ .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info = {
.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+static const struct intel_device_info intel_i945gm_info = {
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info = {
.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
};
-const static struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info = {
.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info = {
.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info = {
.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info = {
.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
.need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info = {
.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info = {
.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
.need_gfx_hws = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct pci_device_id pciidlist[] = {
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1, .is_gen6 = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1, .is_gen6 = 1,
+};
+
+static const struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -167,6 +179,8 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
{0, 0, 0}
};
@@ -174,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (pch) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ int id;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ }
+ }
+ pci_dev_put(pch);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -201,7 +244,7 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+int i915_suspend(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -255,7 +298,7 @@ static int i915_drm_thaw(struct drm_device *dev)
return error;
}
-static int i915_resume(struct drm_device *dev)
+int i915_resume(struct drm_device *dev)
{
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -297,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
/*
* Clear request list
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
if (need_display)
i915_save_display(dev);
@@ -327,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
}
} else {
DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ mutex_unlock(&dev->struct_mutex);
return -ENODEV;
}
@@ -345,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
- struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ !dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
-
+ ring->init(dev, ring);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -546,6 +567,11 @@ static struct drm_driver driver = {
static int __init i915_init(void)
{
+ if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return -ENODEV;
+ }
+
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
@@ -571,6 +597,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
+ if (!(driver.driver_features & DRIVER_MODESET)) {
+ driver.suspend = i915_suspend;
+ driver.resume = i915_resume;
+ }
+
return drm_init(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..2e1744d37ad5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
#include "i915_reg.h"
#include "intel_bios.h"
+#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
/* General customization:
@@ -55,6 +56,8 @@ enum plane {
#define I915_NUM_PIPE 2
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
/* Interface history:
*
* 1.1: Original.
@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object {
struct drm_gem_object *cur_obj;
};
-typedef struct _drm_i915_ring_buffer {
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
- struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
@@ -128,6 +121,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
+ struct list_head lru_list;
};
struct sdvo_device_mapping {
@@ -135,6 +129,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 initialized;
+ u8 ddc_pin;
};
struct drm_i915_error_state {
@@ -150,12 +145,32 @@ struct drm_i915_error_state {
u32 instps;
u32 instdone1;
u32 seqno;
+ u64 bbaddr;
struct timeval time;
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer[2];
+ struct drm_i915_error_buffer {
+ size_t size;
+ u32 name;
+ u32 seqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ u32 fence_reg;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ } *active_bo;
+ u32 active_bo_count;
};
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
- bool (*fbc_enabled)(struct drm_crtc *crtc);
+ bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
@@ -175,6 +190,7 @@ struct intel_overlay;
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
+ u8 is_i85x : 1;
u8 is_i915g : 1;
u8 is_i9xx : 1;
u8 is_i945gm : 1;
@@ -185,6 +201,7 @@ struct intel_device_info {
u8 is_g4x : 1;
u8 is_pineview : 1;
u8 is_ironlake : 1;
+ u8 is_gen6 : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
@@ -192,6 +209,24 @@ struct intel_device_info {
u8 cursor_needs_physical : 1;
};
+enum no_fbc_reason {
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+};
+
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+
+struct intel_fbdev;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -202,15 +237,16 @@ typedef struct drm_i915_private {
void __iomem *regs;
struct pci_dev *bridge_dev;
- drm_i915_ring_buffer_t ring;
+ struct intel_ring_buffer render_ring;
+ struct intel_ring_buffer bsd_ring;
drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
+ void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int status_gfx_addr;
+ unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *hws_obj;
+ struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -225,8 +261,6 @@ typedef struct drm_i915_private {
atomic_t irq_received;
/** Protects user_irq_refcount and irq_mask_reg */
spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
- int user_irq_refcount;
u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
@@ -247,6 +281,7 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ int num_pipe;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -292,7 +327,7 @@ typedef struct drm_i915_private {
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
- unsigned int fsb_freq, mem_freq;
+ unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
@@ -302,6 +337,11 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
+ unsigned long quirks;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@@ -452,6 +492,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
struct {
struct drm_mm gtt_space;
@@ -468,18 +509,7 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
spinlock_t active_list_lock;
- struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
@@ -517,12 +547,6 @@ typedef struct drm_i915_private {
struct list_head fence_list;
/**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -577,10 +601,13 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
wait_queue_head_t pending_flip_queue;
+ bool flip_pending_is_done;
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
+ /* indicate whether the LVDS EDID is OK */
+ bool lvds_edid_good;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
@@ -590,11 +617,37 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
+
+ bool mchbar_need_disable;
+
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
+ spinlock_t *mchdev_lock;
+
+ enum no_fbc_reason no_fbc_reason;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@@ -604,27 +657,69 @@ struct drm_i915_gem_object {
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on the fenced object LRU */
- struct list_head fence_list;
-
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
- int active;
+ unsigned int active : 1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- int dirty;
+ unsigned int dirty : 1;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ *
+ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
+ */
+ int fence_reg : 5;
+
+ /**
+ * Used for checking the object doesn't appear more than once
+ * in an execbuffer object list.
+ */
+ unsigned int in_execbuffer : 1;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv : 2;
+
+ /**
+ * Refcount for the pages array. With the current locking scheme, there
+ * are at most two concurrent users: Binding a bo to the gtt and
+ * pwrite/pread using physical addresses. So two bits for a maximum
+ * of two users are enough.
+ */
+ unsigned int pages_refcount : 2;
+#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
+
+ /**
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode : 2;
+
+ /** How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ int pin_count : 4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
struct page **pages;
- int pages_refcount;
/**
* Current offset of the object in GTT space.
@@ -633,26 +728,18 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
+ /* Which ring is refering to is this object */
+ struct intel_ring_buffer *ring;
+
/**
* Fake offset for use by mmap(2)
*/
uint64_t mmap_offset;
- /**
- * Fence register bits (if any) for this object. Will be set
- * as needed when mapped into the GTT.
- * Protected by dev->struct_mutex.
- */
- int fence_reg;
-
- /** How many users have pinned this object in GTT space */
- int pin_count;
-
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
- /** Current tiling mode for the object. */
- uint32_t tiling_mode;
+ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
/** Record of address bit 17 of each page at last unbind. */
@@ -675,17 +762,6 @@ struct drm_i915_gem_object {
struct drm_i915_gem_phys_object *phys_obj;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- int in_execbuffer;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- int madv;
-
- /**
* Number of crtcs where this object is currently the fb, but
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -693,6 +769,8 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
/**
* Request queue structure.
*
@@ -704,6 +782,9 @@ struct drm_i915_gem_object {
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -736,6 +817,8 @@ extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
extern unsigned int i915_lvds_downclock;
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -758,16 +841,20 @@ extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
extern int i965_reset(struct drm_device *dev, u8 flags);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -785,6 +872,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -850,17 +942,21 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -871,9 +967,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+uint32_t i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring);
+int i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno, int interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -897,7 +997,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
int tiling_mode);
-bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
+bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
+ int tiling_mode);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -945,6 +1046,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -953,7 +1060,8 @@ extern void g4x_disable_fbc(struct drm_device *dev);
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+ == NULL) \
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
@@ -966,35 +1074,31 @@ extern void g4x_disable_fbc(struct drm_device *dev);
#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
#define I915_READ64(reg) readq(dev_priv->regs + (reg))
#define POSTING_READ(reg) (void)I915_READ(reg)
+#define POSTING_READ16(reg) (void)I915_READ16(reg)
#define I915_VERBOSE 0
-#define RING_LOCALS volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do { \
- int bytes__ = 4*(n); \
- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- /* a wrap must occur between instructions so pad beforehand */ \
- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
- i915_wrap_ring(dev); \
- if (unlikely (dev_priv->ring.space < bytes__)) \
- i915_wait_ring(dev, bytes__, __func__); \
- ring_virt__ = (unsigned int *) \
- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
- dev_priv->ring.tail += bytes__; \
- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
- dev_priv->ring.space -= bytes__; \
+#define BEGIN_LP_RING(n) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
+ intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
} while (0)
-#define OUT_RING(n) do { \
- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *ring_virt__++ = (n); \
+
+#define OUT_RING(x) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
+ intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
+ DRM_DEBUG("ADVANCE_LP_RING %x\n", \
+ dev_priv->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0)
/**
@@ -1012,21 +1116,19 @@ extern void g4x_disable_fbc(struct drm_device *dev);
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
+ (dev_priv->render_ring.status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1043,8 +1145,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_GEN3(dev) (IS_I915G(dev) || \
+ IS_I915GM(dev) || \
+ IS_I945G(dev) || \
+ IS_I945GM(dev) || \
+ IS_G33(dev) || \
+ IS_PINEVIEW(dev))
+#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E42)
+
+#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
@@ -1057,7 +1180,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+ !IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1067,6 +1191,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
+ IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa39..5aa747fc25a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,11 +31,10 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -123,14 +122,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
+ obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
if (ret)
return ret;
@@ -164,13 +161,13 @@ fast_shmem_read(struct page **pages,
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj_priv->tiling_mode != I915_TILING_NONE;
}
-static inline int
+static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
struct page *src_page,
@@ -179,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
{
char *dst_vaddr, *src_vaddr;
- dst_vaddr = kmap_atomic(dst_page, KM_USER0);
- if (dst_vaddr == NULL)
- return -ENOMEM;
-
- src_vaddr = kmap_atomic(src_page, KM_USER1);
- if (src_vaddr == NULL) {
- kunmap_atomic(dst_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ dst_vaddr = kmap(dst_page);
+ src_vaddr = kmap(src_page);
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
- kunmap_atomic(src_vaddr, KM_USER1);
- kunmap_atomic(dst_vaddr, KM_USER0);
-
- return 0;
+ kunmap(src_page);
+ kunmap(dst_page);
}
-static inline int
+static inline void
slow_shmem_bit17_copy(struct page *gpu_page,
int gpu_offset,
struct page *cpu_page,
@@ -217,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
cpu_page, cpu_offset, length);
}
- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
- if (gpu_vaddr == NULL)
- return -ENOMEM;
-
- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
- if (cpu_vaddr == NULL) {
- kunmap_atomic(gpu_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ gpu_vaddr = kmap(gpu_page);
+ cpu_vaddr = kmap(cpu_page);
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
* XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -249,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
length -= this_length;
}
- kunmap_atomic(cpu_vaddr, KM_USER1);
- kunmap_atomic(gpu_vaddr, KM_USER0);
-
- return 0;
+ kunmap(cpu_page);
+ kunmap(gpu_page);
}
/**
@@ -265,7 +244,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -286,7 +265,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -355,7 +334,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -404,7 +383,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -428,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- ret = slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
- page_length);
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 1);
+ } else {
+ slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -480,7 +457,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check source.
*
@@ -488,7 +465,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -501,7 +478,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -532,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
* page faults
*/
-static inline int
+static inline void
slow_kernel_write(struct io_mapping *mapping,
loff_t gtt_base, int gtt_offset,
struct page *user_page, int user_offset,
int length)
{
- char *src_vaddr, *dst_vaddr;
- unsigned long unwritten;
+ char __iomem *dst_vaddr;
+ char *src_vaddr;
- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
- src_vaddr = kmap_atomic(user_page, KM_USER1);
- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
- kunmap_atomic(src_vaddr, KM_USER1);
- io_mapping_unmap_atomic(dst_vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+ src_vaddr = kmap(user_page);
+
+ memcpy_toio(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+
+ kunmap(user_page);
+ io_mapping_unmap(dst_vaddr);
}
static inline int
@@ -582,7 +558,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -606,7 +582,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto fail;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -656,7 +632,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -700,7 +676,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto out_unpin_object;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -723,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- goto out_unpin_object;
+ slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
remain -= page_length;
offset += page_length;
@@ -762,7 +731,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -782,7 +751,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -830,7 +799,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -878,7 +847,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -903,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
- page_length);
+ page_length,
+ 0);
+ } else {
+ slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -953,7 +920,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check destination.
*
@@ -961,7 +928,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -974,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0) {
+ dev->gtt_total != 0 &&
+ obj->write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
if (ret == -EFAULT) {
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -995,7 +963,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -1035,7 +1003,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1052,7 +1020,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* about to occur.
*/
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list,
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
@@ -1097,7 +1067,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Pinned buffers may be scanout, so flush the cache */
if (obj_priv->pin_count)
@@ -1138,9 +1108,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1170,7 +1138,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1237,7 +1205,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
@@ -1308,7 +1276,7 @@ void
i915_gem_release_mmap(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev->dev_mapping)
unmap_mapping_range(dev->dev_mapping,
@@ -1319,7 +1287,7 @@ static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
@@ -1350,7 +1318,7 @@ static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int start, i;
/*
@@ -1409,7 +1377,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1453,7 +1421,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size / PAGE_SIZE;
int i;
@@ -1470,9 +1438,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->pages[i] == NULL)
- break;
-
if (obj_priv->dirty)
set_page_dirty(obj_priv->pages[i]);
@@ -1488,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ BUG_ON(ring == NULL);
+ obj_priv->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1501,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
}
/* Move from whatever list we were on to the tail of execution. */
spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->list, &ring->active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1512,7 +1479,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1523,7 +1490,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
inode = obj->filp->f_path.dentry->d_inode;
@@ -1544,7 +1511,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
@@ -1555,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
obj_priv->last_rendering_seqno = 0;
+ obj_priv->ring = NULL;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -1562,24 +1530,52 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
i915_verify_inactive(dev, __FILE__, __LINE__);
}
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
+static void
+i915_gem_process_flushing_list(struct drm_device *dev,
+ uint32_t flush_domains, uint32_t seqno,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain &&
+ obj_priv->ring->ring_flag == ring->ring_flag) {
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_active(obj, seqno, ring);
+
+ /* update the fence lru list */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+ }
+ }
+}
+
uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains)
+ uint32_t flush_domains, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
- RING_LOCALS;
if (file_priv != NULL)
i915_file_priv = file_priv->driver_priv;
@@ -1588,28 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (request == NULL)
return 0;
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
-
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- DRM_DEBUG_DRIVER("%d\n", seqno);
+ seqno = ring->add_request(dev, ring, file_priv, flush_domains);
request->seqno = seqno;
+ request->ring = ring;
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list);
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
if (i915_file_priv) {
list_add_tail(&request->client_list,
&i915_file_priv->mm.request_list);
@@ -1620,29 +1602,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
- gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- }
-
- }
+ if (flush_domains != 0)
+ i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1659,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* before signalling the CPU
*/
static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
uint32_t flush_domains = 0;
- RING_LOCALS;
/* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev))
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
+
+ ring->flush(dev, ring,
+ I915_GEM_DOMAIN_COMMAND, flush_domains);
return flush_domains;
}
@@ -1692,14 +1649,14 @@ i915_gem_retire_request(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&dev_priv->mm.active_list)) {
+ while (!list_empty(&request->ring->active_list)) {
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ obj_priv = list_first_entry(&request->ring->active_list,
struct drm_i915_gem_object,
list);
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@@ -1743,32 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ return ring->get_gem_seqno(dev, ring);
}
/**
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+ if (!ring->status_page.page_addr
+ || list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev);
+ seqno = i915_get_gem_seqno(dev, ring);
- while (!list_empty(&dev_priv->mm.request_list)) {
+ while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
uint32_t retiring_seqno;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
retiring_seqno = request->seqno;
@@ -1786,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- i915_user_irq_put(dev);
+
+ ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
}
@@ -1802,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
if (!dev_priv->mm.suspended &&
- !list_empty(&dev_priv->mm.request_list))
+ (!list_empty(&dev_priv->render_ring.request_list) ||
+ (HAS_BSD(dev) &&
+ !list_empty(&dev_priv->bsd_ring.request_list))))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ int interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1821,8 +1787,8 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IRONLAKE(dev))
+ if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+ if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1835,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
trace_i915_gem_request_wait_begin(dev, seqno);
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_get(dev);
+ ring->waiting_gem_seqno = seqno;
+ ring->user_irq_get(dev, ring);
if (interruptible)
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
else
- wait_event(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- i915_user_irq_put(dev);
- dev_priv->mm.waiting_gem_seqno = 0;
+ ring->user_irq_put(dev, ring);
+ ring->waiting_gem_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1856,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
+ __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1864,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, ring);
return ret;
}
@@ -1874,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* request and object lists appropriately for that event.
*/
static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
- return i915_do_wait_request(dev, seqno, 1);
+ return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
@@ -1885,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev,
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
- invalidate_domains, flush_domains);
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
+ dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+ invalidate_domains,
+ flush_domains);
+
+ if (HAS_BSD(dev))
+ dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+ invalidate_domains,
+ flush_domains);
+}
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+{
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+ ring->flush(dev, ring,
+ invalidate_domains,
+ flush_domains);
}
/**
@@ -1960,7 +1887,7 @@ static int
i915_gem_object_wait_rendering(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -1976,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ ret = i915_wait_request(dev,
+ obj_priv->last_rendering_seqno, obj_priv->ring);
if (ret != 0)
return ret;
}
@@ -1991,7 +1919,8 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
#if WATCH_BUF
@@ -2046,8 +1975,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
}
/* Remove ourselves from the LRU list if present. */
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2067,7 +1998,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2085,29 +2016,66 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
}
static int
+i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
+ uint32_t seqno1, seqno2;
+ int ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev) ||
+ list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return 0;
+
+ /* Flush everything onto the inactive list. */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->render_ring);
+ if (seqno1 == 0)
+ return -ENOMEM;
+ ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev)) {
+ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->bsd_ring);
+ if (seqno2 == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+ }
+
+
+ return ret;
+}
+
+static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
@@ -2120,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
@@ -2134,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
struct drm_gem_object *obj;
int ret;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
for (;;) {
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, bsd_ring);
/* If there's an inactive buffer available now, grab it
* and be done.
@@ -2147,7 +2122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
@@ -2159,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
* things, wait for the next to finish and hopefully leave us
* a buffer to evict.
*/
- if (!list_empty(&dev_priv->mm.request_list)) {
+ if (!list_empty(&render_ring->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&render_ring->request_list,
struct drm_i915_gem_request,
list);
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&bsd_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
if (ret)
return ret;
@@ -2183,7 +2174,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@@ -2193,17 +2184,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
if (obj != NULL) {
uint32_t seqno;
- i915_gem_flush(dev,
+ i915_gem_flush_ring(dev,
+ obj->write_domain,
obj->write_domain,
- obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ obj_priv->ring);
+ seqno = i915_add_request(dev, NULL,
+ obj->write_domain,
+ obj_priv->ring);
if (seqno == 0)
return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
-
continue;
}
}
@@ -2223,12 +2212,14 @@ int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count, i;
struct address_space *mapping;
struct inode *inode;
struct page *page;
- int ret;
+
+ BUG_ON(obj_priv->pages_refcount
+ == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2248,14 +2239,13 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
page = read_cache_page_gfp(mapping, i,
- mapping_gfp_mask (mapping) |
+ GFP_HIGHUSER |
__GFP_COLD |
+ __GFP_RECLAIMABLE |
gfpmask);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- i915_gem_object_put_pages(obj);
- return ret;
- }
+ if (IS_ERR(page))
+ goto err_pages;
+
obj_priv->pages[i] = page;
}
@@ -2263,6 +2253,37 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj_priv->pages[i]);
+
+ drm_free_large(obj_priv->pages);
+ obj_priv->pages = NULL;
+ obj_priv->pages_refcount--;
+ return PTR_ERR(page);
+}
+
+static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int regnum = obj_priv->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj_priv->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
}
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2270,7 +2291,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2290,7 +2311,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
int tile_width;
uint32_t fence_reg, val;
@@ -2313,6 +2334,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2332,7 +2359,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
@@ -2361,6 +2388,59 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
+static int i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_gem_object *obj_priv = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = NULL;
+ int i, avail, ret;
+
+ /* First try to find a free reg */
+ avail = 0;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return i;
+
+ obj_priv = to_intel_bo(reg->obj);
+ if (!obj_priv->pin_count)
+ avail++;
+ }
+
+ if (avail == 0)
+ return -ENOSPC;
+
+ /* None available, try to steal one or wait for a user to finish */
+ i = I915_FENCE_REG_NONE;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list,
+ lru_list) {
+ obj = reg->obj;
+ obj_priv = to_intel_bo(obj);
+
+ if (obj_priv->pin_count)
+ continue;
+
+ /* found one! */
+ i = obj_priv->fence_reg;
+ break;
+ }
+
+ BUG_ON(i == I915_FENCE_REG_NONE);
+
+ /* We only have a reference on obj from the active list. put_fence_reg
+ * might drop that one, causing a use-after-free in it. So hold a
+ * private reference to obj like the other callers of put_fence_reg
+ * (set_tiling ioctl) do. */
+ drm_gem_object_reference(obj);
+ ret = i915_gem_object_put_fence_reg(obj);
+ drm_gem_object_unreference(obj);
+ if (ret != 0)
+ return ret;
+
+ return i;
+}
+
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
@@ -2379,14 +2459,14 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *old_obj_priv = NULL;
- int i, ret, avail;
+ int ret;
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0;
}
@@ -2410,86 +2490,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- /* First try to find a free reg */
- avail = 0;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- break;
-
- old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
- avail++;
- }
-
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- struct drm_gem_object *old_obj = NULL;
-
- if (avail == 0)
- return -ENOSPC;
-
- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- old_obj = old_obj_priv->obj;
-
- if (old_obj_priv->pin_count)
- continue;
-
- /* Take a reference, as otherwise the wait_rendering
- * below may cause the object to get freed out from
- * under us.
- */
- drm_gem_object_reference(old_obj);
-
- /* i915 uses fences for GPU access to tiled buffers */
- if (IS_I965G(dev) || !old_obj_priv->active)
- break;
-
- /* This brings the object to the head of the LRU if it
- * had been written to. The only way this should
- * result in us waiting longer than the expected
- * optimal amount of time is if there was a
- * fence-using buffer later that was read-only.
- */
- i915_gem_object_flush_gpu_write_domain(old_obj);
- ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0) {
- drm_gem_object_unreference(old_obj);
- return ret;
- }
-
- break;
- }
-
- /*
- * Zap this virtual mapping so we can set up a fence again
- * for this object next time we need it.
- */
- i915_gem_release_mmap(old_obj);
-
- i = old_obj_priv->fence_reg;
- reg = &dev_priv->fence_regs[i];
-
- old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&old_obj_priv->fence_list);
-
- drm_gem_object_unreference(old_obj);
- }
+ ret = i915_find_fence_reg(dev);
+ if (ret < 0)
+ return ret;
- obj_priv->fence_reg = i;
- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ obj_priv->fence_reg = ret;
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev))
+ sandybridge_write_fence_reg(reg);
+ else if (IS_I965G(dev))
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+ trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
+ obj_priv->tiling_mode);
return 0;
}
@@ -2506,11 +2527,16 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev))
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
+ (obj_priv->fence_reg * 8), 0);
+ } else if (IS_I965G(dev)) {
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else {
+ } else {
uint32_t fence_reg;
if (obj_priv->fence_reg < 8)
@@ -2522,9 +2548,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, 0);
}
- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+ reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&obj_priv->fence_list);
+ list_del_init(&reg->lru_list);
}
/**
@@ -2539,11 +2565,17 @@ int
i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
+ /* If we've changed tiling, GTT-mappings of the object
+ * need to re-fault to ensure that the correct fence register
+ * setup is in place.
+ */
+ i915_gem_release_mmap(obj);
+
/* On the i915, GPU access to tiled buffers is via a fence,
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
@@ -2552,12 +2584,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
- i915_gem_object_flush_gtt_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_gtt_write_domain(obj);
i915_gem_clear_fence_reg (obj);
return 0;
@@ -2571,7 +2603,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
@@ -2588,6 +2620,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return -EINVAL;
}
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->size > dev->gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
@@ -2678,7 +2718,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
@@ -2697,8 +2737,8 @@ static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- uint32_t seqno;
uint32_t old_write_domain;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
@@ -2706,9 +2746,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
BUG_ON(obj->write_domain);
- i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -2781,7 +2820,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2831,7 +2870,7 @@ int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2847,23 +2886,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ 0,
+ obj_priv->ring);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_cpu_write_domain(obj);
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
@@ -3044,7 +3084,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
@@ -3129,7 +3169,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (!obj_priv->page_cpu_valid)
return;
@@ -3169,7 +3209,7 @@ static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
@@ -3238,7 +3278,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int i, ret;
void __iomem *reloc_page;
bool need_fence;
@@ -3247,8 +3287,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_obj_fenceable(dev, obj))
- i915_gem_object_unbind(obj);
+ if (need_fence &&
+ !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3262,9 +3307,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
if (need_fence) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
i915_gem_object_unpin(obj);
return ret;
}
@@ -3288,7 +3330,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EBADF;
}
- target_obj_priv = target_obj->driver_private;
+ target_obj_priv = to_intel_bo(target_obj);
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3317,6 +3359,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -3427,62 +3479,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return 0;
}
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- RING_LOCALS;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
-
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
-
- /* XXX breadcrumb */
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3511,7 +3507,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev, request->seqno, request->ring);
if (ret != 0)
break;
}
@@ -3630,7 +3626,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
prepare_to_wait(&dev_priv->pending_flip_queue,
&wait, TASK_INTERRUPTIBLE);
for (i = 0; i < count; i++) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (atomic_read(&obj_priv->pending_flip) > 0)
break;
}
@@ -3651,6 +3647,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
return ret;
}
+
int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv,
@@ -3668,10 +3665,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
+ struct intel_ring_buffer *ring = NULL;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
+ if (args->flags & I915_EXEC_BSD) {
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with wrong flag\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->bsd_ring;
+ } else {
+ ring = &dev_priv->render_ring;
+ }
+
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3739,7 +3748,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
@@ -3784,11 +3793,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != -ENOSPC || pin_tries >= 1) {
if (ret != -ERESTARTSYS) {
unsigned long long total_size = 0;
- for (i = 0; i < args->buffer_count; i++)
+ int num_fences = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ obj_priv = to_intel_bo(object_list[i]);
+
total_size += object_list[i]->size;
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ num_fences +=
+ exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+ }
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
pinned+1, args->buffer_count,
- total_size, ret);
+ total_size, num_fences,
+ ret);
DRM_ERROR("%d objects [%d pinned], "
"%d object bytes [%d pinned], "
"%d/%d gtt bytes\n",
@@ -3858,14 +3875,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
(void)i915_add_request(dev, file_priv,
- dev->flush_domains);
+ dev->flush_domains,
+ &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
+ }
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
@@ -3897,7 +3921,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
#endif
/* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+ ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+ cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -3907,7 +3932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev);
+ flush_domains = i915_retire_commands(dev, ring);
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3918,12 +3943,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
- seqno = i915_add_request(dev, file_priv, flush_domains);
+ seqno = i915_add_request(dev, file_priv, flush_domains, ring);
BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno);
+ i915_gem_object_move_to_active(obj, seqno, ring);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -3940,7 +3966,7 @@ err:
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
obj_priv->in_execbuffer = false;
}
drm_gem_object_unreference(object_list[i]);
@@ -4035,7 +4061,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = 0;
+ exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
if (!ret) {
@@ -4118,10 +4144,23 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+
i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+ alignment = i915_gem_get_gtt_alignment(obj);
+ if (obj_priv->gtt_offset & (alignment - 1)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret)
@@ -4151,7 +4190,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
@@ -4191,7 +4230,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4248,7 +4287,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -4274,6 +4313,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = dev->dev_private;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
@@ -4288,9 +4328,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* actually unmasked, and our working set ends up being larger than
* required.
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
- obj_priv = obj->driver_private;
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
+ obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
@@ -4336,7 +4379,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
@@ -4362,34 +4405,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->agp_type = AGP_USER_MEMORY;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(&obj->base);
+
+ return &obj->base;
+}
- trace_i915_gem_object_create(obj);
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
return 0;
}
@@ -4397,7 +4444,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
trace_i915_gem_object_destroy(obj);
@@ -4412,9 +4459,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@@ -4427,9 +4476,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@@ -4445,307 +4494,152 @@ int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
+ int ret;
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+ if (dev_priv->mm.suspended ||
+ (dev_priv->render_ring.gem_object == NULL) ||
+ (HAS_BSD(dev) &&
+ dev_priv->bsd_ring.gem_object == NULL)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
- mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- mutex_lock(&dev->struct_mutex);
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-
- if (seqno == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret) {
mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ return ret;
}
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- atomic_set(&dev_priv->mm.wedged, 1);
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!atomic_read(&dev_priv->mm.wedged)) {
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- WARN_ON(!list_empty(&dev_priv->mm.active_list));
- WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- WARN_ON(!list_empty(&dev_priv->mm.request_list));
}
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
*/
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_inactive_list(dev);
- WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
+ i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
+
mutex_unlock(&dev->struct_mutex);
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
static int
-i915_gem_init_hws(struct drm_device *dev)
+i915_gem_init_pipe_control(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
-
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- return ret;
- }
+ if (ret)
+ goto err_unref;
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
- dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
- if (dev_priv->hw_status_page == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
- dev_priv->hws_obj = obj;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
}
+
static void
-i915_gem_cleanup_hws(struct drm_device *dev)
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- if (dev_priv->hws_obj == NULL)
- return;
-
- obj = dev_priv->hws_obj;
- obj_priv = obj->driver_private;
-
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
- dev_priv->hws_obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- dev_priv->hw_status_page = NULL;
+ dev_priv->seqno_obj = NULL;
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
+ dev_priv->seqno_page = NULL;
}
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
int ret;
- u32 head;
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
+ dev_priv->render_ring = render_ring;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- i915_gem_cleanup_hws(dev);
- return -ENOMEM;
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
}
- obj_priv = obj->driver_private;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return ret;
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ return ret;
}
- /* Set up the kernel mapping for the ring. */
- ring->Size = obj->size;
-
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
- ring->map.size = obj->size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return -EINVAL;
- }
- ring->ring_obj = obj;
- ring->virtual_start = ring->map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
- DRM_ERROR("Ring head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- I915_WRITE(PRB0_HEAD, 0);
-
- DRM_ERROR("Ring head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* If the head is still not zero, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return -EIO;
- }
+ ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ if (ret)
+ goto cleanup_pipe_control;
- /* Update our cache of the ring state */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
+ if (HAS_BSD(dev)) {
+ dev_priv->bsd_ring = bsd_ring;
+ ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (ret)
+ goto cleanup_render_ring;
}
return 0;
+
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+ return ret;
}
void
@@ -4753,17 +4647,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
- i915_gem_cleanup_hws(dev);
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
}
int
@@ -4791,12 +4679,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
spin_lock(&dev_priv->mm.active_list_lock);
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
@@ -4835,22 +4725,37 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+ if (HAS_BSD(dev)) {
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+ }
+ for (i = 0; i < 16; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (IS_GEN3(dev)) {
+ u32 tmp = I915_READ(MI_ARB_STATE);
+ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
+ /* arb state is a masked write, so set bit + bit in mask */
+ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
+ I915_WRITE(MI_ARB_STATE, tmp);
+ }
+ }
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
- dev_priv->fence_reg_start = 3;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
@@ -4946,7 +4851,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int ret;
int page_count;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!obj_priv->phys_obj)
return;
@@ -4985,7 +4890,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->phys_obj) {
if (obj_priv->phys_obj->id == id)
@@ -5036,7 +4941,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
void *obj_addr;
int ret;
char __user *user_data;
@@ -5068,7 +4973,23 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
}
static int
-i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+i915_gpu_is_active(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list);
+ if (HAS_BSD(dev))
+ lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ return !lists_empty;
+}
+
+static int
+i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
drm_i915_private_t *dev_priv, *next_dev;
struct drm_i915_gem_object *obj_priv, *next_obj;
@@ -5096,6 +5017,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&shrink_list_lock);
+rescan:
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
@@ -5105,14 +5027,16 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
continue;
spin_unlock(&shrink_list_lock);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
- i915_gem_retire_requests(dev);
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@@ -5141,7 +5065,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
@@ -5153,6 +5077,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
would_deadlock = 0;
}
+ if (nr_to_scan) {
+ int active = 0;
+
+ /*
+ * We are desperate for pages, so as a last resort, wait
+ * for the GPU to finish and discard whatever we can.
+ * This has a dramatic impact to reduce the number of
+ * OOM-killer events whilst running the GPU aggressively.
+ */
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ if (i915_gpu_is_active(dev)) {
+ i915_gpu_idle(dev);
+ active++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (active)
+ goto rescan;
+ }
+
spin_unlock(&shrink_list_lock);
if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f8..80f380b1d951 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
@@ -72,7 +72,7 @@ void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685bf..4b7c49d4257d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
*
*/
-#include <linux/acpi.h>
-#include <linux/pnp.h>
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
@@ -83,120 +81,6 @@
* to match what the GPU expects.
*/
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret = 0;
-
- if (IS_I965G(dev))
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
-#endif
-
- /* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- goto out;
- }
-
- if (IS_I965G(dev))
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static bool
-intel_setup_mchbar(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool need_disable = false, enabled;
-
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- goto out;
-
- if (intel_alloc_mchbar_resource(dev))
- goto out;
-
- need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-out:
- return need_disable;
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev, bool disable)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
-
- if (disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- bool need_disable;
- if (IS_IRONLAKE(dev)) {
+ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* Try to make sure MCHBAR is enabled before poking at it */
- need_disable = intel_setup_mchbar(dev);
-
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
-
- intel_teardown_mchbar(dev, need_disable);
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
-
-/**
- * Returns whether an object is currently fenceable. If not, it may need
- * to be unbound and have its pitch adjusted.
- */
-bool
-i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
- if (obj->size & 4095)
- return false;
- return true;
- } else if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
-
- /* Power of two sized... */
- if (obj->size & (obj->size - 1))
- return false;
-
- /* Objects must be size aligned as well */
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
- return true;
-}
-
/* Check pitch constriants for all chips & tiling formats */
bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -357,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (stride > 8192)
return false;
- } else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
- return false;
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
/* 965+ just needs multiples of tile width */
@@ -391,11 +232,11 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-static bool
+bool
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->gtt_space == NULL)
return true;
@@ -435,15 +276,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EBUSY;
+ }
+
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
@@ -482,9 +326,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* need to ensure that any fence register is cleared.
*/
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj);
+ else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ ret = i915_gem_object_put_fence_reg(obj);
else
- ret = i915_gem_object_put_fence_reg(obj);
+ i915_gem_release_mmap(obj);
+
if (ret != 0) {
WARN(ret != -ERESTARTSYS,
"failed to reset object for tiling switch");
@@ -493,12 +340,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
goto err;
}
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
}
@@ -524,7 +365,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -587,7 +428,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int i;
@@ -616,7 +457,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int i;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..dba53d4b9fb3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -27,6 +27,7 @@
*/
#include <linux/sysrq.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -52,7 +53,7 @@
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
PIPE_VBLANK_INTERRUPT_STATUS)
@@ -73,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -114,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -166,11 +167,15 @@ void intel_enable_asle (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
- else
+ else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
+ if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, 0,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ }
}
/**
@@ -255,18 +260,49 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
+ if (mode_config->num_encoder) {
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- if (intel_output->hot_plug)
- (*intel_output->hot_plug) (intel_output);
+ if (intel_encoder->hot_plug)
+ (*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
+}
+
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_delay = dev_priv->cur_delay;
+
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->min_delay)
+ new_delay = dev_priv->min_delay;
+ }
+
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->cur_delay = new_delay;
+
+ return;
}
irqreturn_t ironlake_irq_handler(struct drm_device *dev)
@@ -275,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
struct drm_i915_master_private *master_priv;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -297,14 +334,17 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ if (gt_iir & GT_PIPE_NOTIFY) {
+ u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
@@ -331,6 +371,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ if (de_iir & DE_PCU_EVENT) {
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ i915_handle_rps_change(dev);
+ }
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +421,126 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_device *dev,
+ struct drm_gem_object *src)
+{
+ struct drm_i915_error_object *dst;
+ struct drm_i915_gem_object *src_priv;
+ int page, page_count;
+
+ if (src == NULL)
+ return NULL;
+
+ src_priv = to_intel_bo(src);
+ if (src_priv->pages == NULL)
+ return NULL;
+
+ page_count = src->size / PAGE_SIZE;
+
+ dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ for (page = 0; page < page_count; page++) {
+ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ unsigned long flags;
+
+ if (d == NULL)
+ goto unwind;
+ local_irq_save(flags);
+ s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s, KM_IRQ0);
+ local_irq_restore(flags);
+ dst->pages[page] = d;
+ }
+ dst->page_count = page_count;
+ dst->gtt_offset = src_priv->gtt_offset;
+
+ return dst;
+
+unwind:
+ while (page--)
+ kfree(dst->pages[page]);
+ kfree(dst);
+ return NULL;
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ i915_error_object_free(error->batchbuffer[0]);
+ i915_error_object_free(error->batchbuffer[1]);
+ i915_error_object_free(error->ringbuffer);
+ kfree(error->active_bo);
+ kfree(error);
+}
+
+static u32
+i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+{
+ u32 cmd;
+
+ if (IS_I830(dev) || IS_845G(dev))
+ cmd = MI_BATCH_BUFFER;
+ else if (IS_I965G(dev))
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ else
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+
+ return ring[0] == cmd ? ring[1] : 0;
+}
+
+static u32
+i915_ringbuffer_last_batch(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 head, bbaddr;
+ u32 *ring;
+
+ /* Locate the current position in the ringbuffer and walk back
+ * to find the most recently dispatched batch buffer.
+ */
+ bbaddr = 0;
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+
+ if (bbaddr == 0) {
+ ring = (u32 *)(dev_priv->render_ring.virtual_start
+ + dev_priv->render_ring.size);
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+ }
+
+ return bbaddr;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -388,19 +553,26 @@ static void i915_error_work_func(struct work_struct *work)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
struct drm_i915_error_state *error;
+ struct drm_gem_object *batchbuffer[2];
unsigned long flags;
+ u32 bbaddr;
+ int count;
spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error)
- goto out;
+ error = dev_priv->first_error;
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ if (error)
+ return;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
- goto out;
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
}
+ error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +583,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
} else {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
@@ -418,34 +591,114 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instps = I915_READ(INSTPS);
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
+ error->bbaddr = I915_READ64(BB_ADDR);
}
- do_gettimeofday(&error->time);
+ bbaddr = i915_ringbuffer_last_batch(dev);
+
+ /* Grab the current batchbuffer, most likely to have crashed. */
+ batchbuffer[0] = NULL;
+ batchbuffer[1] = NULL;
+ count = 0;
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
+
+ struct drm_gem_object *obj = &obj_priv->base;
- dev_priv->first_error = error;
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
-out:
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size &&
+ batchbuffer[0] != obj)
+ batchbuffer[1] = obj;
+
+ count++;
+ }
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userpace.
+ */
+ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+
+ /* Record the ringbuffer */
+ error->ringbuffer = i915_error_object_create(dev,
+ dev_priv->render_ring.gem_object);
+
+ /* Record buffers on the active list. */
+ error->active_bo = NULL;
+ error->active_bo_count = 0;
+
+ if (count)
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+ GFP_ATOMIC);
+
+ if (error->active_bo) {
+ int i = 0;
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ error->active_bo[i].size = obj->size;
+ error->active_bo[i].name = obj->name;
+ error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
+ error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
+ error->active_bo[i].read_domains = obj->read_domains;
+ error->active_bo[i].write_domain = obj->write_domain;
+ error->active_bo[i].fence_reg = obj_priv->fence_reg;
+ error->active_bo[i].pinned = 0;
+ if (obj_priv->pin_count > 0)
+ error->active_bo[i].pinned = 1;
+ if (obj_priv->user_pin_count > 0)
+ error->active_bo[i].pinned = -1;
+ error->active_bo[i].tiling = obj_priv->tiling_mode;
+ error->active_bo[i].dirty = obj_priv->dirty;
+ error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
+
+ if (++i == count)
+ break;
+ }
+ error->active_bo_count = i;
+ }
+
+ do_gettimeofday(&error->time);
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (dev_priv->first_error == NULL) {
+ dev_priv->first_error = error;
+ error = NULL;
+ }
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ if (error)
+ i915_error_state_free(dev, error);
}
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+
+ spin_lock(&dev_priv->error_lock);
+ error = dev_priv->first_error;
+ dev_priv->first_error = NULL;
+ spin_unlock(&dev_priv->error_lock);
+
+ if (error)
+ i915_error_state_free(dev, error);
+}
+
+static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
- i915_capture_error_state(dev);
+ if (!eir)
+ return;
printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
eir);
@@ -491,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
+ u32 pipea_stats = I915_READ(PIPEASTAT);
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
+
printk(KERN_ERR "memory refresh error\n");
printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
pipea_stats);
@@ -547,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+static void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
if (wedged) {
atomic_set(&dev_priv->mm.wedged, 1);
@@ -554,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -573,10 +847,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
atomic_inc(&dev_priv->irq_received);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -653,33 +928,46 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ u32 seqno =
+ render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
+ if (dev_priv->flip_pending_is_done)
+ intel_finish_page_flip_plane(dev, 0);
+ }
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 1);
+ if (dev_priv->flip_pending_is_done)
+ intel_finish_page_flip_plane(dev, 1);
+ }
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
- intel_finish_page_flip(dev, 0);
+ if (!dev_priv->flip_pending_is_done)
+ intel_finish_page_flip(dev, 0);
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
- intel_finish_page_flip(dev, 1);
+ if (!dev_priv->flip_pending_is_done)
+ intel_finish_page_flip(dev, 1);
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
@@ -708,7 +996,6 @@ static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
i915_kernel_lost_context(dev);
@@ -730,43 +1017,13 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_user_irq_get(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IRONLAKE(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IRONLAKE(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
if (dev_priv->trace_irq_seqno == 0)
- i915_user_irq_get(dev);
+ render_ring->user_irq_get(dev, render_ring);
dev_priv->trace_irq_seqno = seqno;
}
@@ -776,6 +1033,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -789,10 +1047,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+ render_ring->user_irq_get(dev, render_ring);
+ DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_put(dev);
+ render_ring->user_irq_put(dev, render_ring);
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -811,7 +1069,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->ring.virtual_start) {
+ if (!dev_priv || !dev_priv->render_ring.virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -861,7 +1119,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (IS_I965G(dev))
@@ -883,7 +1141,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
@@ -897,7 +1155,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -957,9 +1215,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+struct drm_i915_gem_request *
+i915_get_tail_request(struct drm_device *dev)
+{
drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+ return list_entry(dev_priv->render_ring.request_list.prev,
+ struct drm_i915_gem_request, list);
}
/**
@@ -973,15 +1234,21 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd;
-
+
+ /* No reset support on this chip yet. */
+ if (IS_GEN6(dev))
+ return;
+
if (!IS_I965G(dev))
acthd = I915_READ(ACTHD);
else
acthd = I915_READ(ACTHD_I965);
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->mm.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+ if (list_empty(&dev_priv->render_ring.request_list) ||
+ i915_seqno_passed(i915_get_gem_seqno(dev,
+ &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
return;
}
@@ -1034,7 +1301,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_USER_INTERRUPT;
+ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
@@ -1048,7 +1315,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
(void) I915_READ(DEIER);
/* user interrupt should be enabled, but masked initial */
- dev_priv->gt_irq_mask_reg = 0xffffffff;
+ dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1064,6 +1331,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
(void) I915_READ(SDEIER);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Clear & enable PCU event interrupts */
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ }
+
return 0;
}
@@ -1076,7 +1350,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_preinstall(dev);
return;
}
@@ -1104,11 +1378,14 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+ if (HAS_BSD(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
@@ -1118,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[1] = 0;
if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
- hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Ignore TV since it's buggy */
-
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
- i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@@ -1158,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- /* Disable pipe interrupt enables, clear pending pipe status */
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
- /* Clear pending interrupt status */
- I915_WRITE(IIR, I915_READ(IIR));
-
- I915_WRITE(IER, enable_mask);
I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, enable_mask);
(void) I915_READ(IER);
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
opregion_enable_asle(dev);
return 0;
@@ -1196,7 +1479,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239cb..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
int i = 0;
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk (KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
-
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- opregion->acpi->didl[i] = 0;
+ goto end;
}
int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..cf41c672defe 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -158,6 +178,7 @@
#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
@@ -210,6 +231,16 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WC_FLUSH (1<<12)
+#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_ISP_DIS (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
* Fence registers
@@ -221,7 +252,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
@@ -234,6 +265,9 @@
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+
/*
* Instruction and interrupt control regs
*/
@@ -265,6 +299,7 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -274,6 +309,10 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+
+#define MI_MODE 0x0209c
+# define VS_TIMER_DISPATCH (1 << 6)
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -282,7 +321,7 @@
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -296,6 +335,7 @@
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1<<25)
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
@@ -306,16 +346,83 @@
#define I915_ERROR_MEMORY_REFRESH (1<<1)
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
-#define FW_BLC_SELF_EN (1<<15)
+#define FW_BLC_SELF_EN_MASK (1<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
+#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ * fetches. This is not turned on by default
+ */
+#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
+#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
+#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
+#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
+#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define MI_ARB_TIME_SLICE_MASK (7 << 5)
+#define MI_ARB_TIME_SLICE_1 (0 << 5)
+#define MI_ARB_TIME_SLICE_2 (1 << 5)
+#define MI_ARB_TIME_SLICE_4 (2 << 5)
+#define MI_ARB_TIME_SLICE_6 (3 << 5)
+#define MI_ARB_TIME_SLICE_8 (4 << 5)
+#define MI_ARB_TIME_SLICE_10 (5 << 5)
+#define MI_ARB_TIME_SLICE_14 (6 << 5)
+#define MI_ARB_TIME_SLICE_16 (7 << 5)
+
+/* Low priority grace period page size */
+#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
+#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
+
+/* Set display plane priority */
+#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
+#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
@@ -324,8 +431,42 @@
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define ECOSKPD 0x021d0
+#define ECO_GATING_CX_ONLY (1<<3)
+#define ECO_FLIP_DONE (1<<0)
+
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM 0x2098
+#define GEN6_RENDER_IMR 0x20a8
+#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
+#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
+#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
+#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
+#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
+#define GEN6_RENDER_SYNC_STATUS (1 << 2)
+#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
+
+#define GEN6_BLITTER_HWSTAM 0x22098
+#define GEN6_BLITTER_IMR 0x220a8
+#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
+#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
+#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
+#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+/*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+#define BSD_RING_TAIL 0x04030
+#define BSD_RING_HEAD 0x04034
+#define BSD_RING_START 0x04038
+#define BSD_RING_CTL 0x0403c
+#define BSD_RING_ACTHD 0x04074
+#define BSD_HWS_PGA 0x04080
/*
* Framebuffer compression (915+ only)
@@ -338,7 +479,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_C3_IDLE (1<<13)
+#define FBC_CTL_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
@@ -763,6 +904,10 @@
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
@@ -784,10 +929,185 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
-/** GM965 GM45 render standby register */
-#define MCHBAR_RENDER_STANDBY 0x111B8
+#define TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define MCHBAR_RENDER_STANDBY 0x111b8
#define RCX_SW_EXIT (1<<23)
#define RSX_STATUS_MASK 0x00700000
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -878,8 +1198,6 @@
#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
-#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -1588,6 +1906,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1748,7 +2074,10 @@
/* Display & cursor control */
/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_DITHER_TYPE_MASK (3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
+#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1812,15 +2141,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -1847,6 +2185,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2010,6 +2385,14 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+#define PCH_DSPCLK_GATE_D 0x42020
+# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
+# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+
+#define PCH_3DCGDIS0 0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
+
#define FDI_PLL_FREQ_CTL 0x46030
#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
@@ -2111,16 +2494,26 @@
#define DEIER 0x4400c
/* GT interrupt */
+#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
+#define GT_BSD_USER_INTERRUPT (1 << 5)
+
#define GTISR 0x44010
#define GTIMR 0x44014
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define ILK_DISPLAY_CHICKEN2 0x42004
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DSPCLK_GATE 0x42020
+#define ILK_DPARB_CLK_GATE (1<<5)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@@ -2131,6 +2524,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2222,6 +2620,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@@ -2308,6 +2717,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2340,6 +2762,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@@ -2405,12 +2834,18 @@
#define SDVO_ENCODING (0)
#define TMDS_ENCODING (2 << 10)
#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+/* CPT */
+#define HDMI_MODE_SELECT (1 << 9)
+#define DVI_MODE_SELECT (0)
#define SDVOB_BORDER_ENABLE (1 << 7)
#define AUDIO_ENABLE (1 << 6)
#define VSYNC_ACTIVE_HIGH (1 << 4)
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
#define HDMIC 0xe1150
#define HDMID 0xe1160
@@ -2434,6 +2869,7 @@
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
@@ -2468,4 +2904,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..60a5800fba6e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
}
/* VGA state */
@@ -682,6 +684,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -700,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ /* only restore FBC info on the platform that supports FBC*/
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
}
-
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
@@ -745,11 +750,16 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveGTIMR = I915_READ(GTIMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveMCHBAR_RENDER_STANDBY =
+ I915_READ(MCHBAR_RENDER_STANDBY);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -820,6 +830,9 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_drps(dev);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38f..fab21760dd57 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind,
__entry->obj, __entry->gtt_offset)
);
-TRACE_EVENT(i915_gem_object_clflush,
-
- TP_PROTO(struct drm_gem_object *obj),
-
- TP_ARGS(obj),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- ),
-
- TP_printk("obj=%p", __entry->obj)
-);
-
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -115,7 +98,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@@ -132,21 +115,25 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
TP_PROTO(struct drm_gem_object *obj),
- TP_ARGS(obj),
+ TP_ARGS(obj)
+);
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
- TP_fast_assign(
- __entry->obj = obj;
- ),
+ TP_PROTO(struct drm_gem_object *obj),
- TP_printk("obj=%p", __entry->obj)
+ TP_ARGS(obj)
+);
+
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj)
);
/* batch tracing */
@@ -197,8 +184,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -217,64 +203,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, seqno)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@@ -291,26 +248,23 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
+ TP_ARGS(dev)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
+ TP_PROTO(struct drm_device *dev),
- TP_printk("dev=%u", __entry->dev)
+ TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..96f75d7f6633 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -247,6 +257,7 @@ static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
+ struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
/* Set sensible defaults in case we can't find the general block */
@@ -263,7 +274,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -365,6 +376,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->initialized = 1;
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
@@ -416,8 +428,9 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
- assume 18bpp panel color depth.\n");
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+ "supported, assume 18bpp panel color "
+ "depth.\n");
dev_priv->edp_bpp = 18;
}
return;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..ee0732b222a1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -25,6 +25,7 @@
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -39,7 +40,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -113,7 +114,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -122,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -135,12 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
- adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -151,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa;
+ u32 adpa, temp;
bool ret;
- adpa = I915_READ(PCH_ADPA);
+ temp = adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -175,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
+ if (HAS_PCH_CPT(dev)) {
+ I915_WRITE(PCH_ADPA, temp);
+ (void)I915_READ(PCH_ADPA);
+ }
+
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -199,10 +217,11 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hotplug_en;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
int i, tries = 0;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
/*
@@ -214,15 +233,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
tries = 2;
else
tries = 1;
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= CRT_FORCE_HOTPLUG_MASK;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
-
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
-
for (i = 0; i < tries ; i++) {
unsigned long timeout;
/* turn on the FORCE_DETECT */
@@ -237,28 +250,34 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
} while (time_after(timeout, jiffies));
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
- CRT_HOTPLUG_MONITOR_NONE)
- return true;
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
- return false;
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
}
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
- if (intel_output->type != INTEL_OUTPUT_ANALOG)
+ if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
return false;
- return intel_ddc_probe(intel_output);
+ return intel_ddc_probe(intel_encoder);
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -386,8 +405,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -399,18 +418,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(connector))
+ if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_output);
+ status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_output,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
- status = intel_crt_load_detect(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
+ status = intel_crt_load_detect(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder,
+ connector, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -420,9 +440,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
- intel_i2c_destroy(intel_output->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -431,29 +448,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_output *intel_output = to_intel_output(connector);
- struct i2c_adapter *ddcbus;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_output);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_output->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_output->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_output->ddc_bus) {
- intel_output->ddc_bus = ddcbus;
+ if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_output);
- intel_i2c_destroy(ddcbus);
+ ret = intel_ddc_get_modes(connector, ddc_bus);
+ intel_i2c_destroy(ddc_bus);
end:
return ret;
@@ -490,12 +505,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -505,26 +524,33 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+ intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
return;
- connector = &intel_output->base;
- drm_connector_init(dev, &intel_output->base,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_encoder->enc);
/* Set up the DDC bus. */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
@@ -532,25 +558,30 @@ void intel_crt_init(struct drm_device *dev)
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
- intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_output->ddc_bus) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+ if (!intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
return;
}
- intel_output->type = INTEL_OUTPUT_ANALOG;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 0;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23ebc..5e21b3119824 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -28,6 +28,7 @@
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
@@ -232,7 +233,7 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* Ironlake */
+/* Ironlake / Sandybridge */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
@@ -690,7 +691,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
@@ -741,36 +742,18 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
+ struct drm_encoder *l_entry;
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_output *intel_output = to_intel_output(l_entry);
- if (intel_output->type == type)
+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+ if (l_entry && l_entry->crtc == crtc) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
+ if (intel_encoder->type == type)
return true;
}
}
return false;
}
-struct drm_connector *
-intel_pipe_get_output (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
-}
-
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -879,14 +862,14 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t clock;
int max_n;
bool found;
- /* approximately equals target * 0.00488 */
- int err_most = (target >> 8) + (target >> 10);
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
@@ -904,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1002,7 +985,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
@@ -1032,7 +1015,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
- fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1046,28 +1029,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
return;
+ if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+ return; /* Already off, just return */
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
- ; /* nothing */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("FBC idle timed out\n");
+ break;
+ }
+ ; /* do nothing */
+ }
intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1079,7 +1070,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
DPFC_CTL_PLANEB);
@@ -1124,14 +1115,43 @@ void g4x_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+}
+
/**
* intel_update_fbc - enable/disable FBC as needed
* @crtc: CRTC to point the compressor at
@@ -1160,84 +1180,100 @@ static void intel_update_fbc(struct drm_crtc *crtc,
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
+ struct drm_crtc *tmp_crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
+ int crtcs_enabled = 0;
+
+ DRM_DEBUG_KMS("\n");
if (!i915_powersave)
return;
- if (!dev_priv->display.fbc_enabled ||
- !dev_priv->display.enable_fbc ||
- !dev_priv->display.disable_fbc)
+ if (!I915_HAS_FBC(dev))
return;
if (!crtc->fb)
return;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = intel_fb->obj->driver_private;
+ obj_priv = to_intel_bo(intel_fb->obj);
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
+ * - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled)
+ crtcs_enabled++;
+ }
+ DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+ if (crtcs_enabled > 1) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
- if (dev_priv->display.fbc_enabled(crtc)) {
+ if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
- if (fb->pitch > dev_priv->cfb_pitch)
- dev_priv->display.disable_fbc(dev);
- if (obj_priv->fence_reg != dev_priv->cfb_fence)
- dev_priv->display.disable_fbc(dev);
- if (plane != dev_priv->cfb_plane)
- dev_priv->display.disable_fbc(dev);
+ if ((fb->pitch > dev_priv->cfb_pitch) ||
+ (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+ (plane != dev_priv->cfb_plane))
+ intel_disable_fbc(dev);
}
- if (!dev_priv->display.fbc_enabled(crtc)) {
- /* Now try to turn it back on if possible */
- dev_priv->display.enable_fbc(crtc, 500);
- }
+ /* Now try to turn it back on if possible */
+ if (!intel_fbc_enabled(dev))
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (dev_priv->display.fbc_enabled(crtc))
- dev_priv->display.disable_fbc(dev);
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
}
-static int
+int
intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
@@ -1317,7 +1353,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_fb = to_intel_framebuffer(crtc->fb);
obj = intel_fb->obj;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1366,7 +1402,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1375,7 +1411,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, crtc->fb->pitch);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -1395,7 +1432,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = intel_fb->obj->driver_private;
+ obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
intel_increase_pllclock(crtc, true);
@@ -1427,7 +1464,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -1504,6 +1541,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, tries = 0;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ break;
+ }
+ }
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ tries = 0;
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, i;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1517,8 +1767,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1535,8 +1783,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int tries = 5, j, n;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1563,12 +1812,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1578,9 +1821,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
temp &= ~(0x7 << 16);
temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
I915_READ(fdi_rx_reg);
udelay(200);
@@ -1623,91 +1872,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(200);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0 &&
+ (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (trans_dpll_sel == 1 &&
+ (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
}
- DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1718,6 +1908,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+ /* enable normal train */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ reg |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ reg |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+ }
+
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
/*
@@ -1732,23 +1976,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
- /* enable normal */
-
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
-
- /* wait one idle pattern time */
- udelay(100);
-
}
intel_crtc_load_lut(crtc);
@@ -1799,6 +2026,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pf_ctl_reg);
}
I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1819,11 +2048,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(fdi_tx_reg, temp);
+ POSTING_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(fdi_rx_reg);
udelay(100);
@@ -1853,6 +2089,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
+
temp = I915_READ(transconf_reg);
/* BPC in transcoder is consistent with that in pipeconf */
temp &= ~PIPE_BPC_MASK;
@@ -1861,35 +2098,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(transconf_reg);
udelay(100);
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+
+ }
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
+ /* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
+ /* Disable CPU FDI TX PLL */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
-
/* Wait for the clocks to turn off. */
udelay(100);
break;
@@ -2015,6 +2270,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_wait_for_vblank(dev);
}
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipeconf_reg == PIPEACONF &&
+ (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ goto skip_pipe_off;
+
/* Next, disable display pipes */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -2030,7 +2290,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
}
-
+ skip_pipe_off:
/* Wait for the clocks to turn off. */
udelay(150);
break;
@@ -2111,7 +2371,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2325,6 +2585,30 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -2376,6 +2660,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
struct cxsr_latency {
int is_desktop;
+ int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
@@ -2385,33 +2670,45 @@ struct cxsr_latency {
};
static struct cxsr_latency cxsr_latency_table[] = {
- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
-
- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
-
- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
-
- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
-
- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
-
- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
- int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
+ int fsb, int mem)
{
int i;
struct cxsr_latency *latency;
@@ -2422,6 +2719,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
@@ -2443,66 +2741,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
DRM_INFO("Big FIFO is disabled\n");
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
-
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -2587,6 +2825,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
+static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+ int sr_clock;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size)
{
@@ -2685,11 +2988,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
srwm &= 0x3f;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ if (IS_I965GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ if (IS_I965GM(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2757,11 +3062,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev)) {
+ /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ }
} else {
/* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ } else if (IS_I915GM(dev)) {
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+ }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2796,6 +3112,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
I915_WRITE(FW_BLC, fwater_lo);
}
+#define ILK_LP0_PLANE_LATENCY 700
+
+static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int sr_wm, cursor_wm;
+ unsigned long line_time_us;
+ int sr_clock, entries_required;
+ u32 reg_value;
+
+ /* Calculate and update the watermark for plane A */
+ if (planea_clock) {
+ entries_required = ((planea_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planea_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+ planea_wm = ironlake_display_wm_info.max_wm;
+
+ cursora_wm = 16;
+ reg_value = I915_READ(WM0_PIPEA_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursora_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+ "cursor: %d\n", planea_wm, cursora_wm);
+ }
+ /* Calculate and update the watermark for plane B */
+ if (planeb_clock) {
+ entries_required = ((planeb_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planeb_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+ planeb_wm = ironlake_display_wm_info.max_wm;
+
+ cursorb_wm = 16;
+ reg_value = I915_READ(WM0_PIPEB_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+ "cursor: %d\n", planeb_wm, cursorb_wm);
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ if (!planea_clock || !planeb_clock) {
+ int line_count;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+ / 1000;
+
+ /* calculate the self-refresh watermark for display plane */
+ entries_required = line_count * sr_hdisplay * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_srwm_info.cacheline_size);
+ sr_wm = entries_required +
+ ironlake_display_srwm_info.guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries_required = line_count * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_srwm_info.cacheline_size);
+ cursor_wm = entries_required +
+ ironlake_cursor_srwm_info.guard_size;
+
+ /* configure watermark and enable self-refresh */
+ reg_value = I915_READ(WM1_LP_ILK);
+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+ WM1_LP_CURSOR_MASK);
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+ I915_WRITE(WM1_LP_ILK, reg_value);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_wm, cursor_wm);
+
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ }
+}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -2865,12 +3283,6 @@ static void intel_update_watermarks(struct drm_device *dev)
if (enabled <= 0)
return;
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
-
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
}
@@ -2900,14 +3312,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int refclk, num_outputs = 0;
+ int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -2918,6 +3331,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
int lvds_reg = LVDS;
u32 temp;
int sdvo_pixel_multiply;
@@ -2925,20 +3340,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
- switch (intel_output->type) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (intel_output->needs_tv_clock)
+ if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
@@ -2958,16 +3374,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
break;
}
- num_outputs++;
+ num_connectors++;
}
- if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
+ if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -3025,15 +3441,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IRONLAKE(dev)) {
- int lane, link_bw, bpp;
+ if (HAS_PCH_SPLIT(dev)) {
+ int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
- struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_output(crtc);
- intel_edp_link_config(to_intel_output(edp),
+ intel_edp_link_config(intel_encoder,
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3042,7 +3456,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- lane = 4;
link_bw = 270000;
}
@@ -3094,6 +3507,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3102,7 +3527,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3574,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -3162,7 +3587,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IRONLAKE(dev))
+ else if (HAS_PCH_SPLIT(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
@@ -3174,7 +3599,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3618,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3214,7 +3639,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
- else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
+ else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3227,7 +3652,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3254,14 +3679,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
@@ -3275,6 +3700,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+ udelay(150);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3282,11 +3719,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ lvds |= PORT_TRANS_B_SEL_CPT;
+ else
+ lvds |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ lvds &= ~PORT_TRANS_SEL_MASK;
+ else
+ lvds &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3304,14 +3752,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- else
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ pipeconf |= PIPE_DITHER_TYPE_ST01;
+ } else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf &= ~PIPE_ENABLE_DITHER;
- else
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ } else
lvds &= ~LVDS_ENABLE_DITHER;
}
}
@@ -3320,6 +3771,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+ I915_WRITE(TRANSA_DATA_N1, 0);
+ I915_WRITE(TRANSA_DP_LINK_M1, 0);
+ I915_WRITE(TRANSA_DP_LINK_N1, 0);
+ } else {
+ I915_WRITE(TRANSB_DATA_M1, 0);
+ I915_WRITE(TRANSB_DATA_N1, 0);
+ I915_WRITE(TRANSB_DP_LINK_M1, 0);
+ I915_WRITE(TRANSB_DP_LINK_N1, 0);
+ }
+ }
if (!is_edp) {
I915_WRITE(fp_reg, fp);
@@ -3328,7 +3793,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3360,6 +3825,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vdisplay -= 1;
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_start -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ adjusted_mode->crtc_vsync_end -= 1;
+ adjusted_mode->crtc_vsync_start -= 1;
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+
I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -3375,14 +3852,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3394,6 +3871,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* enable FDI TX PLL too */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ /* enable FDI RX PCDCLK */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+ I915_READ(fdi_rx_reg);
udelay(200);
}
}
@@ -3438,7 +3927,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
/* use legacy palette for Ironlake */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3494,7 +3983,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!bo)
return -ENOENT;
- obj_priv = bo->driver_private;
+ obj_priv = to_intel_bo(bo);
if (bo->size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
@@ -3510,6 +3999,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
+
+ ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
addr = obj_priv->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
@@ -3553,11 +4049,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_bo = bo;
return 0;
-fail:
- mutex_lock(&dev->struct_mutex);
+fail_unpin:
+ i915_gem_object_unpin(bo);
fail_locked:
- drm_gem_object_unreference(bo);
mutex_unlock(&dev->struct_mutex);
+fail:
+ drm_gem_object_unreference_unlocked(bo);
return ret;
}
@@ -3639,9 +4136,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
* detection.
*
* It will be up to the load-detect code to adjust the pipe as appropriate for
- * its requirements. The pipe will be connected to no other outputs.
+ * its requirements. The pipe will be connected to no other encoders.
*
- * Currently this code will only succeed if there is a pipe with no outputs
+ * Currently this code will only succeed if there is a pipe with no encoders
* configured for it. In the future, it could choose to temporarily disable
* some outputs to free up a pipe for its use.
*
@@ -3654,14 +4151,15 @@ static struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode)
{
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3713,8 +4211,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
}
encoder->crtc = crtc;
- intel_output->base.encoder = encoder;
- intel_output->load_detect_temp = true;
+ connector->encoder = encoder;
+ intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
*dpms_mode = intel_crtc->dpms_mode;
@@ -3739,23 +4237,24 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (intel_output->load_detect_temp) {
+ if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_output->base.encoder = NULL;
- intel_output->load_detect_temp = false;
+ connector->encoder = NULL;
+ intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
}
- /* Switch crtc and output back off if necessary */
+ /* Switch crtc and encoder back off if necessary */
if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
if (encoder->crtc == crtc)
encoder_funcs->dpms(encoder, dpms_mode);
@@ -3922,7 +4421,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3932,7 +4431,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -3961,7 +4461,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3975,7 +4475,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -4005,22 +4506,31 @@ static void intel_idle_update(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
+ int enabled = 0;
if (!i915_powersave)
return;
mutex_lock(&dev->struct_mutex);
+ i915_update_gfx_val(dev_priv);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
+ enabled++;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
+ if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
+ DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ }
+
mutex_unlock(&dev->struct_mutex);
}
@@ -4044,9 +4554,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -4058,6 +4576,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc, true);
intel_crtc->busy = true;
@@ -4100,10 +4626,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
+static void do_intel_finish_page_flip(struct drm_device *dev,
+ struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj_priv;
@@ -4118,12 +4644,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
- if (work && !work->pending) {
- obj_priv = work->pending_flip_obj->driver_private;
- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
- obj_priv,
- atomic_read(&obj_priv->pending_flip));
- }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4144,7 +4664,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->pending_flip_obj->driver_private;
+ obj_priv = to_intel_bo(work->pending_flip_obj);
/* Initial scanout buffer will have a 0 pending flip count */
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4153,6 +4673,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
schedule_work(&work->work);
}
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4180,17 +4716,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags;
+ unsigned long flags, offset;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
- RING_LOCALS;
+ u32 flip_mask;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- mutex_lock(&dev->struct_mutex);
-
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4200,10 +4734,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
@@ -4212,13 +4746,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- obj->driver_private);
- kfree(work);
- intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ kfree(work);
+
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ to_intel_bo(obj));
return ret;
}
@@ -4229,20 +4769,37 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
+ if (intel_crtc->plane)
+ flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ else
+ flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+
+ /* Wait for any previous flip to finish */
+ if (IS_GEN3(dev))
+ while (I915_READ(ISR) & flip_mask)
+ ;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = obj_priv->gtt_offset;
+ offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+
BEGIN_LP_RING(4);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
} else {
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(offset);
OUT_RING(MI_NOOP);
}
ADVANCE_LP_RING();
@@ -4355,15 +4912,15 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
return crtc;
}
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- if (type_mask & intel_output->clone_mask)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
@@ -4374,7 +4931,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
intel_crt_init(dev);
@@ -4382,16 +4939,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
intel_dp_init(dev, DP_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4451,34 +5007,27 @@ static void intel_setup_outputs(struct drm_device *dev)
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
- } else if (IS_I8XX(dev))
+ } else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- encoder->possible_crtcs = intel_output->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
- intel_output->clone_mask);
+ encoder->possible_crtcs = intel_encoder->crtc_mask;
+ encoder->possible_clones = intel_encoder_clones(dev,
+ intel_encoder->clone_mask);
}
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(intel_fb->obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(intel_fb->obj);
kfree(intel_fb);
}
@@ -4498,18 +5047,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4517,42 +5061,41 @@ int intel_framebuffer_create(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
intel_fb->obj = obj;
-
- *fb = &intel_fb->base;
-
return 0;
}
-
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
return NULL;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ ret = intel_framebuffer_init(dev, intel_fb,
+ mode_cmd, obj);
if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(intel_fb);
return NULL;
}
- return fb;
+ return &intel_fb->base;
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
+ .output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_gem_object *
@@ -4561,7 +5104,7 @@ intel_alloc_power_context(struct drm_device *dev)
struct drm_gem_object *pwrctx;
int ret;
- pwrctx = drm_gem_object_alloc(dev, 4096);
+ pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
@@ -4591,6 +5134,202 @@ err_unref:
return NULL;
}
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+ int i = 0;
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ fstart = fmax;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fmax;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+ fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
+ if (i++ > 100) {
+ DRM_ERROR("stuck trying to change perf mode\n");
+ break;
+ }
+ msleep(1);
+ }
+ msleep(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +5338,40 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ if (IS_IRONLAKE(dev)) {
+ /* Required for FBC */
+ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ }
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4647,14 +5419,14 @@ void intel_init_clock_gating(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
- obj_priv = dev_priv->pwrctx->driver_private;
+ obj_priv = to_intel_bo(dev_priv->pwrctx);
} else {
struct drm_gem_object *pwrctx;
pwrctx = intel_alloc_power_context(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
+ obj_priv = to_intel_bo(pwrctx);
}
}
@@ -4672,18 +5444,17 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
- /* Only mobile has FBC, leave pointers NULL for other chips */
- if (IS_MOBILE(dev)) {
+ if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
+ } else if (IS_I965GM(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4715,30 +5486,114 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IRONLAKE(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3": "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ else if (IS_I9XX(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
} else {
- if (IS_I85X(dev))
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- else if (IS_845G(dev))
+ dev_priv->display.update_wm = i830_update_wm;
+ if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
- dev_priv->display.update_wm = i830_update_wm;
+ }
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+struct intel_quirk intel_quirks[] = {
+ /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
+
+ /* Thinkpad R31 needs pipe A force quirk */
+ { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
+ { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
+ /* ThinkPad X40 needs pipe A force quirk */
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* 855 & before need to leave pipe A & dpll A up */
+ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+ struct pci_dev *d = dev->pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
}
}
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int num_pipe;
int i;
drm_mode_config_init(dev);
@@ -4748,6 +5603,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ intel_init_quirks(dev);
+
intel_init_display(dev);
if (IS_I965G(dev)) {
@@ -4768,18 +5625,13 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
if (IS_MOBILE(dev) || IS_I9XX(dev))
- num_pipe = 2;
+ dev_priv->num_pipe = 2;
else
- num_pipe = 1;
+ dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- num_pipe, num_pipe > 1 ? "s" : "");
-
- if (IS_I85X(dev))
- pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
- else if (IS_I9XX(dev) || IS_G4X(dev))
- pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
- for (i = 0; i < num_pipe; i++) {
+ for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
}
@@ -4787,18 +5639,16 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
intel_setup_overlay(dev);
-
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4809,6 +5659,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
+ drm_kms_helper_poll_fini(dev);
+ intel_fbdev_fini(dev);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4827,27 +5680,45 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
- obj_priv = dev_priv->pwrctx->driver_private;
+ obj_priv = to_intel_bo(dev_priv->pwrctx);
I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
I915_READ(PWRCTXA);
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(dev_priv->pwrctx);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
mutex_unlock(&dev->struct_mutex);
drm_mode_config_cleanup(dev);
}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- return &intel_output->enc;
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc14..5dde80f9e652 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -26,6 +26,7 @@
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -47,30 +48,28 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
};
static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
void
-intel_edp_link_config (struct intel_output *intel_output,
+intel_edp_link_config (struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
*lane_num = dp_priv->lane_count;
if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -80,9 +79,9 @@ intel_edp_link_config (struct intel_output *intel_output,
}
static int
-intel_dp_max_lane_count(struct intel_output *intel_output)
+intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
if (dp_priv->dpcd[0] >= 0x11) {
@@ -98,9 +97,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
}
static int
-intel_dp_max_link_bw(struct intel_output *intel_output)
+intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_link_bw = dp_priv->dpcd[1];
switch (max_link_bw) {
@@ -126,26 +125,36 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
intel_dp_link_required(struct drm_device *dev,
- struct intel_output *intel_output, int pixel_clock)
+ struct intel_encoder *intel_encoder, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_eDP(intel_output))
+ if (IS_eDP(intel_encoder))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
}
static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
- int max_lanes = intel_dp_max_lane_count(intel_output);
-
- if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
- > max_link_clock * max_lanes)
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
+ int max_lanes = intel_dp_max_lane_count(intel_encoder);
+
+ /* only refuse the mode on non eDP since we have seen some wierd eDP panels
+ which are outside spec tolerances but somehow work by magic */
+ if (!IS_eDP(intel_encoder) &&
+ (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
+ > intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -208,13 +217,13 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_output *intel_output,
+intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -223,19 +232,27 @@ intel_dp_aux_ch(struct intel_output *intel_output,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_output))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IRONLAKE(dev))
+ if (IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -248,7 +265,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -312,7 +329,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_output *intel_output,
+intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -329,7 +346,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -344,15 +361,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_output *intel_output,
+intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_output *intel_output,
+intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -371,7 +388,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -397,7 +414,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
- struct intel_output *intel_output = dp_priv->intel_output;
+ struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -436,7 +453,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_output,
+ ret = intel_dp_aux_ch(intel_encoder,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -464,9 +481,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+ struct intel_connector *intel_connector, const char *name)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false;
@@ -479,7 +497,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -488,18 +506,18 @@ static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_output);
- int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_encoder);
+ int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
- int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
<= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
@@ -512,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
}
+
+ if (IS_eDP(intel_encoder)) {
+ /* okay we failed just pick the highest */
+ dp_priv->lane_count = max_lane_count;
+ dp_priv->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ dp_priv->link_bw, dp_priv->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
return false;
}
@@ -554,23 +584,26 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
/*
- * Find the lane count in the intel_output private
+ * Find the lane count in the intel_encoder private
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
- if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
}
@@ -584,7 +617,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -625,16 +658,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct drm_device *dev = encoder->dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@@ -655,18 +696,18 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
- * Check for DPCD version > 1.1,
- * enable enahanced frame stuff in that case
+ * Check for DPCD version > 1.1 and enhanced framing support
*/
- if (dp_priv->dpcd[0] >= 0x11) {
+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
- if (intel_crtc->pipe == 1)
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
/* don't miss out required setting for eDP */
dp_priv->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
@@ -676,6 +717,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+static void ironlake_edp_panel_on (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
+
+ pp_status = I915_READ(PCH_PP_STATUS);
+ if (pp_status & PP_ON)
+ return;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
+
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+
+ pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
+static void ironlake_edp_panel_off (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
+
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel off wait timed out\n");
+
+ /* Make sure VDD is enabled so DP AUX will work */
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -701,23 +787,27 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- struct drm_device *dev = intel_output->base.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
- intel_dp_link_down(intel_output, dp_priv->DP);
- if (IS_eDP(intel_output))
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (IS_eDP(intel_encoder)) {
ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
+ }
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_output))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (IS_eDP(intel_encoder)) {
+ ironlake_edp_panel_on(dev);
ironlake_edp_backlight_on(dev);
+ }
}
}
dp_priv->dpms_mode = mode;
@@ -728,12 +818,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_output *intel_output,
+intel_dp_get_link_status(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_output,
+ ret = intel_dp_aux_native_read(intel_encoder,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -748,20 +838,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
-static void
-intel_dp_save(struct drm_connector *connector)
-{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
-
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
-}
-
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -824,7 +900,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_output *intel_output,
+intel_get_adjust_train(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -891,6 +967,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -941,15 +1036,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_output *intel_output,
+intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -957,11 +1052,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
if (first)
intel_wait_for_vblank(dev);
- intel_dp_aux_native_write_1(intel_output,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_output,
+ ret = intel_dp_aux_native_write(intel_encoder,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -970,12 +1065,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
}
static void
-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
@@ -984,30 +1079,45 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
+ u32 reg;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_output, 0x100,
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -1032,7 +1142,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
}
/* channel equalization */
@@ -1040,17 +1150,29 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_output, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -1063,56 +1185,55 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
- intel_dp_aux_native_write_1(intel_output,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(dp_priv->output_reg, DP);
POSTING_READ(dp_priv->output_reg);
udelay(100);
}
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
+ }
udelay(17000);
- if (IS_eDP(intel_output))
+ if (IS_eDP(intel_encoder))
DP |= DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(dp_priv->output_reg);
}
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
-
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_output, dp_priv->save_DP);
-}
-
/*
* According to DP spec
* 5.1.2:
@@ -1123,38 +1244,41 @@ intel_dp_restore(struct drm_connector *connector)
*/
static void
-intel_dp_check_link_status(struct intel_output *intel_output)
+intel_dp_check_link_status(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_output->enc.crtc)
+ if (!intel_encoder->enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_output, link_status)) {
- intel_dp_link_down(intel_output, dp_priv->DP);
+ if (!intel_dp_get_link_status(intel_encoder, link_status)) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
return;
}
if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
+ if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
return status;
}
@@ -1167,28 +1291,19 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
dp_priv->has_audio = false;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@@ -1209,7 +1324,7 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_output,
+ if (intel_dp_aux_native_read(intel_encoder,
0x000, dp_priv->dpcd,
sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
@@ -1221,20 +1336,21 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_output);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_output)) {
+ if (IS_eDP(intel_encoder)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1248,13 +1364,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1267,8 +1379,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@@ -1277,12 +1387,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1290,12 +1405,34 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
};
void
-intel_dp_hot_plug(struct intel_output *intel_output)
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_output);
+ intel_dp_check_link_status(intel_encoder);
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
+ }
+ return -1;
}
void
@@ -1303,53 +1440,62 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
- intel_output = kcalloc(sizeof(struct intel_output) +
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
- if (!intel_output)
+ if (!intel_encoder)
return;
- dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
- connector = &intel_output->base;
+ dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
if (output_reg == DP_A)
- intel_output->type = INTEL_OUTPUT_EDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
else
- intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (output_reg == DP_B || output_reg == PCH_DP_B)
- intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
- intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
- intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_output))
- intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ if (IS_eDP(intel_encoder))
+ intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- dp_priv->intel_output = intel_output;
+ dp_priv->intel_encoder = intel_encoder;
dp_priv->output_reg = output_reg;
dp_priv->has_audio = false;
dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
- intel_output->dev_priv = dp_priv;
+ intel_encoder->dev_priv = dp_priv;
- drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_encoder->enc);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
@@ -1377,10 +1523,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_output, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
- intel_output->ddc_bus = &dp_priv->adapter;
- intel_output->hot_plug = intel_dp_hot_plug;
+ intel_encoder->ddc_bus = &dp_priv->adapter;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
if (output_reg == DP_A) {
/* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..2f7970be9051 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,9 +95,7 @@ struct intel_framebuffer {
};
-struct intel_output {
- struct drm_connector base;
-
+struct intel_encoder {
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@@ -105,11 +103,16 @@ struct intel_output {
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
- void (*hot_plug)(struct intel_output *);
+ void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
};
+struct intel_connector {
+ struct drm_connector base;
+ void *dev_priv;
+};
+
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@@ -149,18 +152,19 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_output(x) container_of(x, struct intel_output, base)
-#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_output *intel_output);
-extern bool intel_ddc_probe(struct intel_output *intel_output);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -175,7 +179,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern void intel_edp_link_config (struct intel_output *, int *, int *);
+extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
extern int intel_panel_fitter_pipe (struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@@ -191,32 +195,39 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
-extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
+extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_init_clock_gating(struct drm_device *dev);
+extern void ironlake_enable_drps(struct drm_device *dev);
+extern void ironlake_disable_drps(struct drm_device *dev);
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_gem_object *obj);
+
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
@@ -227,4 +238,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a4d2606de778..227feca7cf8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -25,6 +25,7 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -79,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
u32 dvo_reg = dvo->dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -95,40 +96,12 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_dvo_save(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -149,8 +122,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -185,8 +158,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -240,23 +213,25 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_output);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -274,39 +249,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
-}
-#endif
-
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
@@ -317,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -327,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -351,8 +310,9 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -382,24 +342,31 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
- intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
+ intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
return;
+ }
/* Set up the DDC bus */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_output->ddc_bus)
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+ if (!intel_encoder->ddc_bus)
goto free_intel;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_output->base;
+ struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -434,11 +401,11 @@ void intel_dvo_init(struct drm_device *dev)
if (!ret)
continue;
- intel_output->type = INTEL_OUTPUT_DVO;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
- intel_output->clone_mask =
+ intel_encoder->clone_mask =
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
drm_connector_init(dev, connector,
@@ -447,7 +414,7 @@ void intel_dvo_init(struct drm_device *dev)
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
- intel_output->clone_mask =
+ intel_encoder->clone_mask =
(1 << INTEL_DVO_LVDS_CLONE_BIT);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
@@ -462,16 +429,16 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- intel_output->dev_priv = dvo;
- intel_output->i2c_bus = i2cbus;
+ intel_encoder->dev_priv = dvo;
+ intel_encoder->i2c_bus = i2cbus;
- drm_encoder_init(dev, &intel_output->enc,
+ drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_output->enc,
+ drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -489,10 +456,11 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- intel_i2c_destroy(intel_output->ddc_bus);
+ intel_i2c_destroy(intel_encoder->ddc_bus);
/* Didn't find a chip, so tear down. */
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_output);
+ kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcbe5905..3e18c9e7729b 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -30,11 +30,11 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
-#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intelfb_par {
+struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
- struct intelfb_par *par;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@@ -126,76 +76,72 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
+ fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
- obj_priv = fbo->driver_private;
+ obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, 64*1024);
+ ret = intel_pin_and_fence_fb_obj(dev, fbo);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
}
/* Flush everything out, we'll be doing GTT only from now on */
- i915_gem_object_set_to_gtt_domain(fbo, 1);
-
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
+ ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
+ DRM_ERROR("failed to bind fb: %d.\n", ret);
goto out_unpin;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
- par = info->par;
+ info->par = ifbdev;
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
-
info->fbops = &intelfb_ops;
-
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
- info->aperture_size = pci_resource_len(dev->pdev, 2);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
- info->aperture_size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@@ -208,12 +154,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,16 +177,13 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
+ fb->width, fb->height,
+ obj_priv->gtt_offset, fbo);
+
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
@@ -246,35 +195,92 @@ out:
return ret;
}
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
int ret;
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
}
-EXPORT_SYMBOL(intelfb_probe);
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
-
- if (info) {
- struct intelfb_par *par = info->par;
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj)
+ drm_gem_object_unreference(ifb->obj);
+
+ return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ dev_priv->num_pipe,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ kfree(ifbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed761..83bd764b000e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -27,6 +27,7 @@
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
@@ -38,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
- u32 save_SDVOX;
bool has_hdmi_sink;
};
@@ -50,8 +50,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI |
@@ -59,11 +59,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
SDVO_VSYNC_ACTIVE_HIGH |
SDVO_HSYNC_ACTIVE_HIGH;
- if (hdmi_priv->has_hdmi_sink)
+ if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
+ if (HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+ }
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -73,8 +80,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 temp;
temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -82,7 +89,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,33 +106,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
-
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
-
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -150,21 +136,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(connector,
+ intel_encoder->ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_output->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -173,24 +160,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_output);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -203,8 +187,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@@ -213,12 +195,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -229,63 +216,72 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
- intel_output = kcalloc(sizeof(struct intel_output) +
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
- if (!intel_output)
+ if (!intel_encoder)
return;
- hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1);
- connector = &intel_output->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_output->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
- intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
- intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
- intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
- intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
+ intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
- intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
+ intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
- if (!intel_output->ddc_bus)
+ if (!intel_encoder->ddc_bus)
goto err_connector;
hdmi_priv->sdvox_reg = sdvox_reg;
- intel_output->dev_priv = hdmi_priv;
+ intel_encoder->dev_priv = hdmi_priv;
- drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_encoder->enc);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -301,7 +297,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(intel_encoder);
+ kfree(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8ab..c2649c7df14c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -26,6 +26,7 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
@@ -128,7 +129,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_GMBUS0, 0);
} else {
I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..0eab8df5bf7e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -30,6 +30,7 @@
#include <acpi/button.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -56,7 +57,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +75,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -89,17 +90,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg;
+ u32 pp_status, ctl_reg, status_reg, lvds_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
+ lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
status_reg = PP_STATUS;
+ lvds_reg = LVDS;
}
if (on) {
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
+
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
do {
@@ -115,6 +121,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
do {
pp_status = I915_READ(status_reg);
} while (pp_status & PP_ON);
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
}
}
@@ -130,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
-static void intel_lvds_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (IS_IRONLAKE(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (IS_IRONLAKE(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -230,8 +170,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0;
int left_border = 0, right_border = 0, top_border = 0;
int bottom_border = 0;
@@ -297,7 +237,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +267,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +488,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -578,8 +518,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -587,7 +527,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
/*
@@ -599,53 +539,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
-/* Some lid devices report incorrect lid status, assume they're connected */
-static const struct dmi_system_id bad_lid_status[] = {
- {
- .ident = "Compaq nx9020",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "3084"),
- },
- },
- {
- .ident = "Samsung SX20S",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
- DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
- },
- },
- {
- .ident = "Aspire One",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
- },
- },
- {
- .ident = "Aspire 1810T",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
- },
- },
- {
- .ident = "PC-81005",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
- },
- },
- {
- .ident = "Clevo M5x0N",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
- DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
- },
- },
- { }
-};
-
/**
* Detect the LVDS connection.
*
@@ -655,10 +548,14 @@ static const struct dmi_system_id bad_lid_status[] = {
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
- if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
- status = connector_status_disconnected;
+ /* ACPI lid methods were generally unreliable in this generation, so
+ * don't even bother.
+ */
+ if (IS_GEN2(dev) || IS_GEN3(dev))
+ return connector_status_connected;
return status;
}
@@ -669,14 +566,17 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = intel_ddc_get_modes(intel_output);
+ if (dev_priv->lvds_edid_good) {
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
@@ -699,6 +599,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
/*
* Lid events. Note the use of 'modeset_on_lid':
* - we set it on lid close, and reset it on open
@@ -722,6 +642,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
if (connector)
connector->status = connector->funcs->detect(connector);
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ return NOTIFY_OK;
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
@@ -749,11 +672,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -766,13 +686,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output =
- to_intel_output(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -806,13 +727,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -822,7 +741,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -892,6 +816,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
{ } /* terminating entry */
};
@@ -1002,7 +934,8 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1020,7 +953,7 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
@@ -1030,43 +963,51 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output) +
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_lvds_priv), GFP_KERNEL);
- if (!intel_output) {
+ if (!intel_encoder) {
return;
}
- connector = &intel_output->base;
- encoder = &intel_output->enc;
- drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ encoder = &intel_encoder->enc;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- intel_output->type = INTEL_OUTPUT_LVDS;
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
- intel_output->crtc_mask = (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
- intel_output->dev_priv = lvds_priv;
+ lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
+ intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_output->base,
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_FULLSCREEN);
- lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
+ DRM_MODE_SCALE_ASPECT);
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1078,8 +1019,8 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_output->ddc_bus) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
+ if (!intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed;
@@ -1089,7 +1030,10 @@ void intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_ddc_get_modes(intel_output);
+ dev_priv->lvds_edid_good = true;
+
+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
+ dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
mutex_lock(&dev->mode_config.mutex);
@@ -1123,7 +1067,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1144,7 +1088,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1167,9 +1111,10 @@ out:
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_output);
+ kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 67e2f4632a24..4b1fd3d9c73c 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -23,6 +23,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include "drmP.h"
@@ -33,7 +34,7 @@
* intel_ddc_probe
*
*/
-bool intel_ddc_probe(struct intel_output *intel_output)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder)
{
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
@@ -53,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
}
};
- intel_i2c_quirk_set(intel_output->base.dev, true);
- ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_output->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, true);
+ ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@@ -65,22 +66,23 @@ bool intel_ddc_probe(struct intel_output *intel_output)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_output *intel_output)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->base.dev, true);
- edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
- intel_i2c_quirk_set(intel_output->base.dev, false);
+ intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+ intel_i2c_quirk_set(connector->dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_output->base,
- edid);
- ret = drm_add_edid_modes(&intel_output->base, edid);
- intel_output->base.display_info.raw_edid = NULL;
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e9..d7ad5139d17c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
#define OFC_UPDATE 0x1
#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
if (OVERLAY_NONPHYSICAL(overlay->dev))
io_mapping_unmap_atomic(overlay->virt_addr);
overlay->virt_addr = NULL;
- I915_READ(OVADD); /* flush wc cashes */
-
return;
}
@@ -216,29 +211,28 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
+ drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
overlay->active = 1;
overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -255,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -267,14 +260,13 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(4);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(2);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
}
static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -283,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
u32 tmp;
- RING_LOCALS;
if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret == 0) {
overlay->last_flip_req = 0;
@@ -305,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -323,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -338,40 +331,40 @@ static int intel_overlay_off(struct intel_overlay *overlay)
/* wait for overlay to go idle */
overlay->hw_wedged = SWITCH_OFF_STAGE_1;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
/* turn overlay off */
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -386,7 +379,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
+ obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@@ -403,28 +396,29 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
int interruptible)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr;
int ret;
- RING_LOCALS;
if (overlay->hw_wedged == HW_WEDGED)
return -EIO;
if (overlay->last_flip_req == 0) {
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
}
- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -435,21 +429,20 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req = i915_add_request(dev, NULL,
+ 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible);
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -482,7 +475,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -739,7 +732,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+ struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -824,7 +817,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
intel_overlay_continue(overlay, scale_changed);
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = new_bo->driver_private;
+ overlay->vid_bo = to_intel_bo(new_bo);
return 0;
@@ -1083,14 +1076,18 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj)
- return -ENOENT;
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+ }
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
new_bo = drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle);
- if (!new_bo)
- return -ENOENT;
+ if (!new_bo) {
+ ret = -ENOENT;
+ goto out_free;
+ }
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
@@ -1179,7 +1176,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference(new_bo);
+ drm_gem_object_unreference_unlocked(new_bo);
+out_free:
kfree(params);
return ret;
@@ -1351,10 +1349,10 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = reg_bo->driver_private;
+ overlay->reg_bo = to_intel_bo(reg_bo);
if (OVERLAY_NONPHYSICAL(dev)) {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 000000000000..26362f8495a8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+static void
+render_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+ u32 cmd;
+ trace_i915_gem_request_flush(dev, ring->next_seqno,
+ invalidate_domains, flush_domains);
+
+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, cmd);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ obj_priv = to_intel_bo(ring->gem_object);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(ring->regs.ctl, 0);
+ I915_WRITE(ring->regs.head, 0);
+ I915_WRITE(ring->regs.tail, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+ head = ring->get_head(dev, ring);
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_ERROR("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+
+ I915_WRITE(ring->regs.head, 0);
+
+ DRM_ERROR("%s head forced to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ }
+
+ I915_WRITE(ring->regs.ctl,
+ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_NO_REPORT | RING_VALID);
+
+ head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ /* If the head is still not zero, the ring is dead */
+ if (head != 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ return -EIO;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(dev, ring);
+ if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ }
+ return ret;
+}
+
+#define PIPE_CONTROL_FLUSH(addr) \
+do { \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL | 2); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+} while (0)
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static u32
+render_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ seqno = intel_ring_get_seqno(dev, ring);
+
+ if (IS_GEN6(dev)) {
+ BEGIN_LP_RING(6);
+ OUT_RING(GFX_OP_PIPE_CONTROL | 3);
+ OUT_RING(PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+ return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+ if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
+
+}
+
+void
+bsd_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_FLUSH);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return init_ring_common(dev, ring);
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ seqno = intel_ring_get_seqno(dev, ring);
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+ return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+ (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+ return 0;
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, cliprects, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+ intel_ring_emit(dev, ring,
+ exec_start | MI_BATCH_NON_SECURE);
+ intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+ intel_ring_emit(dev, ring, 0);
+ } else {
+ intel_ring_begin(dev, ring, 4);
+ if (IS_I965G(dev)) {
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | (2 << 6)
+ | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ } else {
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+ | (2 << 6));
+ intel_ring_emit(dev, ring, exec_start |
+ MI_BATCH_NON_SECURE);
+ }
+ }
+ intel_ring_advance(dev, ring);
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+ obj_priv = to_intel_bo(obj);
+
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ring->status_page.obj = NULL;
+
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj_priv->gtt_offset;
+ ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ if (ring->status_page.page_addr == NULL) {
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ ring->setup_status_page(dev, ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ int ret;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ ring->dev = dev;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(dev, ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ring->gem_object = obj;
+
+ ret = i915_gem_object_pin(obj, ring->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ goto cleanup;
+ }
+
+ obj_priv = to_intel_bo(obj);
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ring->virtual_start = ring->map.handle;
+ ret = ring->init(dev, ring);
+ if (ret != 0) {
+ intel_cleanup_ring_buffer(dev, ring);
+ return ret;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ return ret;
+cleanup:
+ cleanup_status_page(dev, ring);
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->gem_object == NULL)
+ return;
+
+ drm_core_ioremapfree(&ring->map, dev);
+
+ i915_gem_object_unpin(ring->gem_object);
+ drm_gem_object_unreference(ring->gem_object);
+ ring->gem_object = NULL;
+ cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ unsigned int *virt;
+ int rem;
+ rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = intel_wait_ring_buffer(dev, ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = (unsigned int *)(ring->virtual_start + ring->tail);
+ rem /= 4;
+ while (rem--)
+ *virt++ = MI_NOOP;
+
+ ring->tail = 0;
+ ring->space = ring->head - 8;
+
+ return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n)
+{
+ unsigned long end;
+
+ trace_i915_ring_wait_begin (dev);
+ end = jiffies + 3 * HZ;
+ do {
+ ring->head = ring->get_head(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
+ return 0;
+ }
+
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+ yield();
+ } while (!time_after(jiffies, end));
+ trace_i915_ring_wait_end (dev);
+ return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int num_dwords)
+{
+ int n = 4*num_dwords;
+ if (unlikely(ring->tail + n > ring->size))
+ intel_wrap_ring_buffer(dev, ring);
+ if (unlikely(ring->space < n))
+ intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+ ring->tail &= ring->size - 1;
+ ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ BUG_ON((len&~(4-1)) != 0);
+ intel_ring_begin(dev, ring, len/4);
+ memcpy(virt, data, len);
+ ring->tail += len;
+ ring->tail &= ring->size - 1;
+ ring->space -= len;
+ intel_ring_advance(dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ seqno = ring->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++ring->next_seqno == 0)
+ ring->next_seqno = 1;
+ return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+ .name = "render ring",
+ .regs = {
+ .ctl = PRB0_CTL,
+ .head = PRB0_HEAD,
+ .tail = PRB0_TAIL,
+ .start = PRB0_START
+ },
+ .ring_flag = I915_EXEC_RENDER,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = render_setup_status_page,
+ .init = init_render_ring,
+ .get_head = render_ring_get_head,
+ .get_tail = render_ring_get_tail,
+ .get_active_head = render_ring_get_active_head,
+ .advance_ring = render_ring_advance_ring,
+ .flush = render_ring_flush,
+ .add_request = render_ring_add_request,
+ .get_gem_seqno = render_ring_get_gem_seqno,
+ .user_irq_get = render_ring_get_user_irq,
+ .user_irq_put = render_ring_put_user_irq,
+ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+ .name = "bsd ring",
+ .regs = {
+ .ctl = BSD_RING_CTL,
+ .head = BSD_RING_HEAD,
+ .tail = BSD_RING_TAIL,
+ .start = BSD_RING_START
+ },
+ .ring_flag = I915_EXEC_BSD,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = bsd_setup_status_page,
+ .init = init_bsd_ring,
+ .get_head = bsd_ring_get_head,
+ .get_tail = bsd_ring_get_tail,
+ .get_active_head = bsd_ring_get_active_head,
+ .advance_ring = bsd_ring_advance_ring,
+ .flush = bsd_ring_flush,
+ .add_request = bsd_ring_add_request,
+ .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 000000000000..d5568d3766de
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct intel_hw_status_page {
+ void *page_addr;
+ unsigned int gfx_addr;
+ struct drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct intel_ring_buffer {
+ const char *name;
+ struct ring_regs {
+ u32 ctl;
+ u32 head;
+ u32 tail;
+ u32 start;
+ } regs;
+ unsigned int ring_flag;
+ unsigned long size;
+ unsigned int alignment;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_gem_object *gem_object;
+
+ unsigned int head;
+ unsigned int tail;
+ unsigned int space;
+ u32 next_seqno;
+ struct intel_hw_status_page status_page;
+
+ u32 irq_gem_seqno; /* last seq seem at irq time */
+ u32 waiting_gem_seqno;
+ int user_irq_refcount;
+ void (*user_irq_get)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*user_irq_put)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*setup_status_page)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ int (*init)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ unsigned int (*get_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_active_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*advance_ring)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*flush)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ u32 (*add_request)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains);
+ u32 (*get_gem_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ int (*dispatch_gem_execbuffer)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset);
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ wait_queue_head_t irq_queue;
+ drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+ int reg)
+{
+ u32 *regs = ring->status_page.page_addr;
+ return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab06..76993ac16cc1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -26,6 +26,7 @@
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
@@ -36,6 +37,18 @@
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+
+
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
@@ -52,7 +65,7 @@ struct intel_sdvo_priv {
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
- int output_device;
+ int sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -84,12 +97,6 @@ struct intel_sdvo_priv {
/* This is for current tv format name */
char *tv_format_name;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -110,29 +117,36 @@ struct intel_sdvo_priv {
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
*/
struct intel_sdvo_encode encode;
- /* DDC bus used by this SDVO output */
+ /* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
/* Mac mini hack -- use the same DDC as the analog connector */
struct i2c_adapter *analog_ddc_bus;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
/* add the property for the SDVO-TV */
struct drm_property *left_property;
struct drm_property *right_property;
@@ -160,22 +174,33 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_output->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (sdvo_priv->output_device == SDVOB) {
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
+ return;
+ }
+
+ if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -194,10 +219,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
+static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
u8 *ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2];
u8 buf[2];
int ret;
@@ -220,7 +245,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -230,10 +255,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
return false;
}
-static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
+static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
u8 ch)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
@@ -247,7 +272,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
+ if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -351,13 +376,14 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
-static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
void *args, int args_len)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
@@ -377,19 +403,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
DRM_LOG_KMS("\n");
}
-static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_output, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
((u8*)args)[i]);
}
- intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -402,11 +428,11 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_output *intel_output,
+static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
void *response, int response_len,
u8 status)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -421,7 +447,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
DRM_LOG_KMS("\n");
}
-static u8 intel_sdvo_read_response(struct intel_output *intel_output,
+static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
void *response, int response_len)
{
int i;
@@ -431,16 +457,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- intel_sdvo_read_byte(intel_output,
+ intel_sdvo_read_byte(intel_encoder,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]);
}
/* read the return status */
- intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
+ intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
&status);
- intel_sdvo_debug_response(intel_output, response, response_len,
+ intel_sdvo_debug_response(intel_encoder, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
return status;
@@ -468,10 +494,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
+static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
u8 target)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
@@ -495,10 +521,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
},
};
- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -507,7 +533,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -521,7 +547,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
return;
}
-static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
+static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
{
struct intel_sdvo_set_target_input_args targets = {0};
u8 status;
@@ -532,10 +558,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
if (target_1)
targets.target_1 = 1;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
sizeof(targets));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
@@ -546,13 +572,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -561,29 +587,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
- u16 *outputs)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
-static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
+static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
sizeof(outputs));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
+static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
int mode)
{
u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -603,24 +618,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
break;
}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
sizeof(state));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
NULL, 0);
- status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
+ status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -632,92 +647,58 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
+static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
u16 outputs)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
sizeof(outputs));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part1,
- sizeof(dtd->part1));
+ intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
- intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &dtd->part2,
- sizeof(dtd->part2));
+ intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
-static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_get_timing(intel_output,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_output,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
- struct intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
- status = intel_sdvo_read_response(intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
+static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_output,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_output *output,
+intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
uint8_t status;
memset(&args, 0, sizeof(args));
@@ -731,32 +712,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
&args, sizeof(args));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
bool status;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
NULL, 0);
- status = intel_sdvo_read_response(output, &dtd->part1,
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
sizeof(dtd->part1));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
NULL, 0);
- status = intel_sdvo_read_response(output, &dtd->part2,
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
sizeof(dtd->part2));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -764,29 +746,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
-{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
-static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
- status = intel_sdvo_read_response(intel_output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
@@ -875,13 +840,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_output *output,
+static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
struct intel_sdvo_encode *encode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
- status = intel_sdvo_read_response(output, encode, sizeof(*encode));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
memset(encode, 0, sizeof(*encode));
return false;
@@ -890,29 +855,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
return true;
}
-static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
+static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
+ uint8_t mode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_colorimetry(struct intel_output *output,
+static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
uint8_t mode)
{
uint8_t status;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
- status = intel_sdvo_read_response(output, NULL, 0);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
return (status == SDVO_CMD_STATUS_SUCCESS);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
+static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
{
int i, j;
uint8_t set_buf_index[2];
@@ -921,43 +887,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
- intel_sdvo_read_response(output, &av_split, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
+ intel_sdvo_read_response(encoder, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
- intel_sdvo_read_response(output, &buf_size, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
pos = buf;
for (j = 0; j <= buf_size; j += 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
NULL, 0);
- intel_sdvo_read_response(output, pos, 8);
+ intel_sdvo_read_response(encoder, pos, 8);
pos += 8;
}
}
}
#endif
-static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
- uint8_t *data, int8_t size, uint8_t tx_rate)
+static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
+ int index,
+ uint8_t *data, int8_t size, uint8_t tx_rate)
{
uint8_t set_buf_index[2];
set_buf_index[0] = index;
set_buf_index[1] = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
for (; size > 0; size -= 8) {
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
data += 8;
}
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1032,7 +1000,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
+static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -1043,15 +1011,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
+ intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
SDVO_HBUF_TX_VSYNC);
}
-static void intel_sdvo_set_tv_format(struct intel_output *output)
+static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
{
struct intel_sdvo_tv_format format;
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
uint32_t format_map, i;
uint8_t status;
@@ -1064,10 +1033,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
sizeof(format));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
DRM_DEBUG_KMS("%s: Failed to set TV format\n",
SDVO_NAME(sdvo_priv));
@@ -1077,8 +1046,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *dev_priv = output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
if (dev_priv->is_tv) {
struct intel_sdvo_dtd output_dtd;
@@ -1093,22 +1062,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- intel_sdvo_set_target_output(output,
- dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- success = intel_sdvo_create_preferred_input_timing(output,
+ success = intel_sdvo_create_preferred_input_timing(intel_encoder,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay);
if (success) {
struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_preferred_input_timing(output,
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
&input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1131,16 +1100,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
intel_sdvo_get_dtd_from_mode(&output_dtd,
dev_priv->sdvo_lvds_fixed_mode);
- intel_sdvo_set_target_output(output,
- dev_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &output_dtd);
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
success = intel_sdvo_create_preferred_input_timing(
- output,
+ intel_encoder,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay);
@@ -1148,7 +1117,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (success) {
struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_preferred_input_timing(output,
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
&input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1180,8 +1149,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 sdvox = 0;
int sdvo_pixel_multiply;
struct intel_sdvo_in_out_map in_out;
@@ -1197,15 +1166,15 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- status = intel_sdvo_read_response(output, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
if (sdvo_priv->is_hdmi) {
- intel_sdvo_set_avi_infoframe(output, mode);
+ intel_sdvo_set_avi_infoframe(intel_encoder, mode);
sdvox |= SDVO_AUDIO_ENABLE;
}
@@ -1222,16 +1191,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
*/
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
- intel_sdvo_set_target_output(output,
- sdvo_priv->controlled_output);
- intel_sdvo_set_output_timing(output, &input_dtd);
+ intel_sdvo_set_target_output(intel_encoder,
+ sdvo_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
if (sdvo_priv->is_tv)
- intel_sdvo_set_tv_format(output);
+ intel_sdvo_set_tv_format(intel_encoder);
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1239,29 +1208,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* output the preferred timing, and we don't support that currently.
*/
#if 0
- success = intel_sdvo_create_preferred_input_timing(output, clock,
+ success = intel_sdvo_create_preferred_input_timing(encoder, clock,
width, height);
if (success) {
struct intel_sdvo_dtd *input_dtd;
- intel_sdvo_get_preferred_input_timing(output, &input_dtd);
- intel_sdvo_set_input_timing(output, &input_dtd);
+ intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
+ intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- intel_sdvo_set_input_timing(output, &input_dtd);
+ intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
#endif
switch (intel_sdvo_get_pixel_multiplier(mode)) {
case 1:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_1X);
break;
case 2:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_2X);
break;
case 4:
- intel_sdvo_set_clock_rate_mult(output,
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
SDVO_CLOCK_RATE_MULT_4X);
break;
}
@@ -1272,8 +1241,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
SDVO_VSYNC_ACTIVE_HIGH |
SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(sdvo_priv->output_device);
- switch (sdvo_priv->output_device) {
+ sdvox |= I915_READ(sdvo_priv->sdvo_reg);
+ switch (sdvo_priv->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1297,26 +1266,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(output, sdvox);
+ intel_sdvo_write_sdvox(intel_encoder, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_output, 0);
+ intel_sdvo_set_active_outputs(intel_encoder, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(sdvo_priv->output_device);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1324,13 +1293,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(sdvo_priv->output_device);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1,
+ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
&input2);
@@ -1344,109 +1313,18 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_output, mode);
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
-static void intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
- intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_get_input_timing(intel_output,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_get_input_timing(intel_output,
- &sdvo_priv->save_input_dtd_2);
- }
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_get_output_timing(intel_output,
- &sdvo_priv->save_output_dtd[o]);
- }
- }
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
- }
-
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_output, 0);
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_output, this_output);
- intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
- }
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_output, true, false);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_output, false, true);
- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
- }
-
- intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
-
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
- }
-
- intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
-}
-
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1471,37 +1349,39 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
{
u8 status;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
- status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
+/* No use! */
+#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_output *iout = NULL;
+ struct intel_encoder *iout = NULL;
struct intel_sdvo_priv *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_output(connector);
+ iout = to_intel_encoder(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
sdvo = iout->dev_priv;
- if (sdvo->output_device == SDVOB && sdvoB)
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
return connector;
- if (sdvo->output_device == SDVOC && !sdvoB)
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
return connector;
}
@@ -1513,16 +1393,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_output = to_intel_output(connector);
+ intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
if (response[0] !=0)
return 1;
@@ -1534,30 +1414,31 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_output, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
}
+#endif
static bool
-intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
+intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
{
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
if (sdvo_priv->caps.output_flags &
@@ -1591,12 +1472,17 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_output *intel_output;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_output = to_intel_output(connector);
- if (intel_output->type == INTEL_OUTPUT_ANALOG)
- return connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (encoder == intel_attached_encoder(connector))
+ return connector;
+ }
+ }
}
return NULL;
}
@@ -1618,18 +1504,20 @@ intel_analog_is_connected(struct drm_device *dev)
}
enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
uint8_t saved_ddc, temp_ddc;
saved_ddc = sdvo_priv->ddc_bus;
temp_ddc = sdvo_priv->ddc_bus >> 1;
@@ -1639,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_output->base,
- intel_output->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
@@ -1657,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev))
- edid = drm_get_edid(&intel_output->base,
- sdvo_priv->analog_ddc_bus);
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(connector->dev))
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
- */
- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- sdvo_priv->is_hdmi =
- drm_detect_hdmi_monitor(edid);
- else
- status = connector_status_disconnected;
- }
+ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
- kfree(edid);
- intel_output->base.display_info.raw_edid = NULL;
+ /* DDC bus is shared, match EDID to connector type */
+ if (is_digital && need_digital)
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ else if (is_digital != need_digital)
+ status = connector_status_disconnected;
- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+ connector->display_info.raw_edid = NULL;
+ } else
status = connector_status_disconnected;
+
+ kfree(edid);
return status;
}
@@ -1687,16 +1571,20 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-
- intel_sdvo_write_cmd(intel_output,
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ enum drm_connector_status ret;
+
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
if (sdvo_priv->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- status = intel_sdvo_read_response(intel_output, &response, 2);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
@@ -1706,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_output) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_output, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ sdvo_priv->attached_output = response;
+
+ if ((sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (response & SDVO_TMDS_MASK)
+ ret = intel_sdvo_hdmi_sink_detect(connector);
+ else
+ ret = connector_status_connected;
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ sdvo_priv->is_lvds = true;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_output);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1733,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_output->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
-
+ !intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_output->ddc_bus;
- intel_output->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_output);
-
- intel_output->ddc_bus = digital_ddc_bus;
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1814,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1835,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
sizeof(format_map) ? sizeof(format_map) :
sizeof(struct intel_sdvo_sdtv_resolution_request));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -1856,9 +1755,10 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1866,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_output);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1895,12 +1795,12 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_output *output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->is_tv)
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1913,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
if (sdvo_priv->left_property)
drm_property_destroy(dev, sdvo_priv->left_property);
if (sdvo_priv->right_property)
@@ -1930,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
drm_property_destroy(dev, sdvo_priv->hpos_property);
if (sdvo_priv->vpos_property)
drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
if (sdvo_priv->saturation_property)
drm_property_destroy(dev,
sdvo_priv->saturation_property);
@@ -1941,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
if (sdvo_priv->hue_property)
drm_property_destroy(dev, sdvo_priv->hue_property);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_priv->brightness_property)
drm_property_destroy(dev,
sdvo_priv->brightness_property);
@@ -1951,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (intel_output->i2c_bus)
- intel_i2c_destroy(intel_output->i2c_bus);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
-
- if (sdvo_priv->tv_format_property)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ sdvo_connector->tv_format_property);
+ intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
-
- kfree(intel_output);
+ kfree(connector);
}
static int
@@ -1983,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1996,105 +1882,105 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret < 0)
goto out;
- if (property == sdvo_priv->tv_format_property) {
+ if (property == sdvo_connector->tv_format_property) {
if (val >= TV_FORMAT_NUM) {
ret = -EINVAL;
goto out;
}
if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
+ sdvo_connector->tv_format_supported[val])
goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
changed = true;
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
cmd = 0;
temp_value = val;
- if (sdvo_priv->left_property == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
goto out;
cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
goto out;
cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
goto out;
cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
goto out;
cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
+ sdvo_connector->cur_brightness = temp_value;
}
if (cmd) {
- intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
- status = intel_sdvo_read_response(intel_output,
+ intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_encoder,
NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2120,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2131,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2152,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
* outputs, then LVDS outputs.
*/
static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
- uint16_t mask = 0;
- unsigned int num_bits;
+ struct sdvo_device_mapping *mapping;
- /* Make a mask of outputs less than or equal to our own priority in the
- * list.
- */
- switch (dev_priv->controlled_output) {
- case SDVO_OUTPUT_LVDS1:
- mask |= SDVO_OUTPUT_LVDS1;
- case SDVO_OUTPUT_LVDS0:
- mask |= SDVO_OUTPUT_LVDS0;
- case SDVO_OUTPUT_TMDS1:
- mask |= SDVO_OUTPUT_TMDS1;
- case SDVO_OUTPUT_TMDS0:
- mask |= SDVO_OUTPUT_TMDS0;
- case SDVO_OUTPUT_RGB1:
- mask |= SDVO_OUTPUT_RGB1;
- case SDVO_OUTPUT_RGB0:
- mask |= SDVO_OUTPUT_RGB0;
- break;
- }
-
- /* Count bits to find what number we are in the priority list. */
- mask &= dev_priv->caps.output_flags;
- num_bits = hweight16(mask);
- if (num_bits > 3) {
- /* if more than 3 outputs, default to DDC bus 3 for now */
- num_bits = 3;
- }
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
- /* Corresponds to SDVO_CONTROL_BUS_DDCx */
- dev_priv->ddc_bus = 1 << num_bits;
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2203,42 +2082,40 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
return true;
}
-static struct intel_output *
-intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
+static struct intel_encoder *
+intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
- struct intel_output *intel_output = NULL;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
- intel_output = to_intel_output(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
break;
- }
}
- return intel_output;
+ return intel_encoder;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_output =
- intel_sdvo_chan_to_intel_output(
+ intel_encoder =
+ intel_sdvo_chan_to_intel_encoder(
(struct intel_i2c_chan *)(algo_data->data));
- if (intel_output == NULL)
+ if (intel_encoder == NULL)
return -EINVAL;
- sdvo_priv = intel_output->dev_priv;
- algo = intel_output->i2c_bus->algo;
+ sdvo_priv = intel_encoder->dev_priv;
+ algo = intel_encoder->i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2247,12 +2124,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
};
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
+intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (output_device == SDVOB) {
+ if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2277,100 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (output_device == SDVOB)
+ if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
static bool
-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
+intel_sdvo_connector_alloc (struct intel_connector **ret)
+{
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
+}
+
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+ connector->connector_type);
+
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_sysfs_connector_add(connector);
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
+
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ intel_sdvo_tv_create_property(connector, type);
+
+ intel_sdvo_create_enhance_property(connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_connector *connector = &intel_output->base;
- struct drm_encoder *encoder = &intel_output->enc;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- bool ret = true, registered = false;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
sdvo_priv->is_tv = false;
- intel_output->needs_tv_clock = false;
+ intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
- }
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_output,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_output) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_output,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_output->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if (flags & SDVO_OUTPUT_SVID0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_output->needs_tv_clock = true;
- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_CVBS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_output->needs_tv_clock = true;
- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else {
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
+ return false;
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
sdvo_priv->controlled_output = 0;
@@ -2378,32 +2392,29 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
-
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
- return ret;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_output,
- sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, type);
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&format, sizeof(format));
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -2414,43 +2425,45 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
if (format_map == 0)
return;
- sdvo_priv->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
tv_format_names[i];
}
- sdvo_priv->tv_format_property =
+ sdvo_connector->tv_format_property =
drm_property_create(
connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ connector, sdvo_connector->tv_format_property, 0);
}
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
uint16_t response, data_value[2];
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
NULL, 0);
- status = intel_sdvo_read_response(intel_output, &sdvo_data,
+ status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
sizeof(sdvo_data));
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2461,23 +2474,23 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
DRM_DEBUG_KMS("No enhancement is supported\n");
return;
}
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
/* when horizontal overscan is supported, Add the left/right
* property
*/
if (sdvo_data.overscan_h) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO max "
"h_overscan\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2507,18 +2520,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.overscan_v) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO max "
"v_overscan\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2548,17 +2561,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.position_h) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_POSITION_H, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2579,17 +2592,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.position_v) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_POSITION_V, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2609,20 +2622,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
- }
- if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SATURATION, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2644,17 +2655,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.contrast) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_CONTRAST, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2675,17 +2686,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
if (sdvo_data.hue) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_HUE, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2706,19 +2717,19 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_data.brightness) {
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&data_value, 4);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
return;
}
- intel_sdvo_write_cmd(intel_output,
+ intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
- status = intel_sdvo_read_response(intel_output,
+ status = intel_sdvo_read_response(intel_encoder,
&response, 2);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2743,109 +2754,98 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
return;
}
-bool intel_sdvo_init(struct drm_device *dev, int output_device)
+bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
-
u8 ch[0x40];
int i;
+ u32 i2c_reg, ddc_reg, analog_ddc_reg;
- intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_output) {
+ intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+ if (!intel_encoder) {
return false;
}
- sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
- sdvo_priv->output_device = output_device;
+ sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
+ sdvo_priv->sdvo_reg = sdvo_reg;
- intel_output->dev_priv = sdvo_priv;
- intel_output->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->dev_priv = sdvo_priv;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ i2c_reg = PCH_GPIOE;
+ ddc_reg = PCH_GPIOE;
+ analog_ddc_reg = PCH_GPIOA;
+ } else {
+ i2c_reg = GPIOE;
+ ddc_reg = GPIOE;
+ analog_ddc_reg = GPIOA;
+ }
/* setup the DDC bus. */
- if (output_device == SDVOB)
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (IS_SDVOB(sdvo_reg))
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
else
- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
- if (!intel_output->i2c_bus)
+ if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
+ intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (output_device == SDVOB) {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ if (IS_SDVOB(sdvo_reg)) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
- if (intel_output->ddc_bus == NULL)
+ if (intel_encoder->ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
- intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
- intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+ intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
- if (intel_sdvo_output_setup(intel_output,
+ if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
-
- connector = &intel_output->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
-
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- drm_encoder_init(dev, &intel_output->enc,
- &intel_sdvo_enc_funcs, intel_output->enc.encoder_type);
-
- drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
-
- drm_sysfs_connector_add(connector);
-
- intel_sdvo_select_ddc_bus(sdvo_priv);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_output, true, false);
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_pixel_clock_range(intel_output,
+ intel_sdvo_get_input_pixel_clock_range(intel_encoder,
&sdvo_priv->pixel_clock_min,
&sdvo_priv->pixel_clock_max);
@@ -2872,12 +2872,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
err_i2c:
if (sdvo_priv->analog_ddc_bus != NULL)
intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_output->ddc_bus != NULL)
- intel_i2c_destroy(intel_output->ddc_bus);
- if (intel_output->i2c_bus != NULL)
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_encoder->ddc_bus != NULL)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (intel_encoder->i2c_bus != NULL)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_output);
+ kfree(intel_encoder);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..6d553c29d106 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-intel_tv_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- int i;
-
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
-
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@@ -1068,9 +931,9 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_output *intel_output)
+intel_tv_mode_find (struct intel_encoder *intel_encoder)
{
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
return intel_tv_mode_lookup(tv_priv->tv_format);
}
@@ -1078,8 +941,9 @@ intel_tv_mode_find (struct intel_output *intel_output)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +959,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -1121,9 +985,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1360,9 +1224,9 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1441,9 +1305,10 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
int dpms_mode;
int type = tv_priv->type;
@@ -1485,12 +1350,14 @@ intel_tv_detect(struct drm_connector *connector)
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(encoder->crtc, intel_output);
+ type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ &mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(crtc, intel_output);
- intel_release_load_detect_pipe(intel_output, dpms_mode);
+ type = intel_tv_detect_type(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder, connector,
+ dpms_mode);
} else
type = -1;
}
@@ -1525,8 +1392,9 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,8 +1418,9 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_output *intel_output = to_intel_output(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_output *intel_output = to_intel_output(connector);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_output);
+ kfree(connector);
}
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1740,7 +1608,8 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_output *intel_output;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1780,28 +1649,34 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_output = kzalloc(sizeof(struct intel_output) +
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_tv_priv), GFP_KERNEL);
- if (!intel_output) {
+ if (!intel_encoder) {
+ return;
+ }
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
return;
}
- connector = &intel_output->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
- tv_priv = (struct intel_tv_priv *)(intel_output + 1);
- intel_output->type = INTEL_OUTPUT_TVOUT;
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
- intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_output->dev_priv = tv_priv;
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+ intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_encoder->dev_priv = tv_priv;
tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
@@ -1812,7 +1687,7 @@ intel_tv_init(struct drm_device *dev)
tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
- drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8c..acd31ed861ef 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -12,17 +12,18 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_dp.o nouveau_grctx.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
- nv40_grctx.o \
+ nv40_grctx.o nv50_grctx.o \
nv04_instmem.o nv50_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o
+ nv17_gpio.o nv50_gpio.o \
+ nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e744753..d4bcca8a5133 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,5 +1,6 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
@@ -11,6 +12,8 @@
#include "nouveau_drm.h"
#include "nv50_display.h"
+#include <linux/vga_switcheroo.h>
+
#define NOUVEAU_DSM_SUPPORTED 0x00
#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
@@ -28,31 +31,30 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
-{
- static char muid[] = {
- 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
- 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
- };
+static struct nouveau_dsm_priv {
+ bool dsm_detected;
+ acpi_handle dhandle;
+ acpi_handle rom_handle;
+} nouveau_dsm_priv;
+
+static const char nouveau_dsm_muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+};
- struct pci_dev *pdev = dev->pdev;
- struct acpi_handle *handle;
+static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
+{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int err;
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
-
- if (!handle)
- return -ENODEV;
-
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(muid);
- params[0].buffer.pointer = (char *)muid;
+ params[0].buffer.length = sizeof(nouveau_dsm_muid);
+ params[0].buffer.pointer = (char *)nouveau_dsm_muid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x00000102;
params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +64,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
- NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+ printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
return err;
}
@@ -86,40 +88,174 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
return 0;
}
-int nouveau_hybrid_setup(struct drm_device *dev)
+static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
- int result;
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+}
- if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
- &result))
- return -ENODEV;
+static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
+{
+ int arg;
+ if (state == VGA_SWITCHEROO_ON)
+ arg = NOUVEAU_DSM_POWER_SPEED;
+ else
+ arg = NOUVEAU_DSM_POWER_STAMINA;
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ return 0;
+}
- NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
-
- if (result) { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
- } else { /* Stamina mode - disable the external GPU */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
- NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
- NULL);
- }
+static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
+ else
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
+}
+
+static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
+}
+
+static int nouveau_dsm_init(void)
+{
return 0;
}
-bool nouveau_dsm_probe(struct drm_device *dev)
+static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
+{
+ if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler nouveau_dsm_handler = {
+ .switchto = nouveau_dsm_switchto,
+ .power_state = nouveau_dsm_power_state,
+ .init = nouveau_dsm_init,
+ .get_client_id = nouveau_dsm_get_client_id,
+};
+
+static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, nvidia_handle;
+ acpi_status status;
+ int ret;
+ uint32_t result;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
+ if (ACPI_FAILURE(status)) {
+ return false;
+ }
+
+ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ if (ret < 0)
+ return false;
+
+ nouveau_dsm_priv.dhandle = dhandle;
+ return true;
+}
+
+static bool nouveau_dsm_detect(void)
{
- int support = 0;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ int has_dsm = 0;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ nouveau_dsm_priv.dsm_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void nouveau_register_dsm_handler(void)
+{
+ bool r;
+
+ r = nouveau_dsm_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&nouveau_dsm_handler);
+}
+
+void nouveau_unregister_dsm_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
- if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ if (!nouveau_dsm_priv.dsm_detected)
+ return false;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
return false;
- if (!support)
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
return false;
+ nouveau_dsm_priv.rom_handle = rom_handle;
return true;
}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 20564f8cb0ec..406228f4a2a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -89,19 +89,21 @@ static struct backlight_ops nv50_bl_ops = {
static int nouveau_nv40_backlight_init(struct drm_device *dev)
{
+ struct backlight_properties props;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct backlight_device *bd;
if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 31;
bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
- &nv40_bl_ops);
+ &nv40_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
dev_priv->backlight = bd;
- bd->props.max_brightness = 31;
bd->props.brightness = nv40_get_intensity(bd);
backlight_update_status(bd);
@@ -110,19 +112,21 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
static int nouveau_nv50_backlight_init(struct drm_device *dev)
{
+ struct backlight_properties props;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct backlight_device *bd;
if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
return 0;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 1025;
bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
- &nv50_bl_ops);
+ &nv50_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
dev_priv->backlight = bd;
- bd->props.max_brightness = 1025;
bd->props.brightness = nv50_get_intensity(bd);
backlight_update_status(bd);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d49130..e492919faf44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
#define NV_DEBUG_NOTRACE
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -177,41 +178,51 @@ out:
pci_disable_rom(dev->pdev);
}
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+ int i;
+ int ret;
+ int size = 64 * 1024;
+
+ if (!nouveau_acpi_rom_supported(dev->pdev))
+ return;
+
+ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+ ret = nouveau_acpi_get_bios_chunk(data,
+ (i * ROM_BIOS_PAGE),
+ ROM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+ return;
+}
+
struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
const bool rw;
};
-static struct methods nv04_methods[] = {
- { "PROM", load_vbios_prom, false },
- { "PRAMIN", load_vbios_pramin, true },
- { "PCIROM", load_vbios_pci, true },
-};
-
-static struct methods nv50_methods[] = {
+static struct methods shadow_methods[] = {
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
+ { "ACPI", load_vbios_acpi, true },
};
-#define METHODCNT 3
-
static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct methods *methods;
- int i;
+ const int nr_methods = ARRAY_SIZE(shadow_methods);
+ struct methods *methods = shadow_methods;
int testscore = 3;
- int scores[METHODCNT];
+ int scores[nr_methods], i;
if (nouveau_vbios) {
- methods = nv04_methods;
- for (i = 0; i < METHODCNT; i++)
+ for (i = 0; i < nr_methods; i++)
if (!strcasecmp(nouveau_vbios, methods[i].desc))
break;
- if (i < METHODCNT) {
+ if (i < nr_methods) {
NV_INFO(dev, "Attempting to use BIOS image from %s\n",
methods[i].desc);
@@ -223,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
- if (dev_priv->card_type < NV_50)
- methods = nv04_methods;
- else
- methods = nv50_methods;
-
- for (i = 0; i < METHODCNT; i++) {
+ for (i = 0; i < nr_methods; i++) {
NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
methods[i].desc);
data[0] = data[1] = 0; /* avoid reuse of previous image */
@@ -239,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
}
while (--testscore > 0) {
- for (i = 0; i < METHODCNT; i++) {
+ for (i = 0; i < nr_methods; i++) {
if (scores[i] == testscore) {
NV_TRACE(dev, "Using BIOS image from %s\n",
methods[i].desc);
@@ -256,6 +262,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
+ /* Return:
+ * > 0: success, length of opcode
+ * 0: success, but abort further parsing of table (INIT_DONE etc)
+ * < 0: failure, table parsing will be aborted
+ */
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
@@ -311,11 +322,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
/* C51 has misaligned regs on purpose. Marvellous */
if (reg & 0x2 ||
- (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
/* warn on C51 regs that haven't been verified accessible in tracing */
- if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
@@ -420,7 +431,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
LOG_OLD_VALUE(bios_rd32(bios, reg));
BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(bios->dev, reg, data);
}
@@ -647,7 +658,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(dev, reg + 4, reg1);
nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +700,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
/*
* For the results of this function to be correct, CR44 must have been
@@ -700,7 +711,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
- if (dcb_entry > bios->bdcb.dcb.entries) {
+ if (dcb_entry > bios->dcb.entries) {
NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
"(%02X)\n", dcb_entry);
dcb_entry = 0x7f; /* unused / invalid marker */
@@ -709,29 +720,121 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40) {
+ if (port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+ }
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
if (i2c_index == 0xff) {
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = bdcb->i2c_default_indices;
+ int default_indices = dcb->i2c_default_indices;
- if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+ if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
shift = 4;
i2c_index = (default_indices >> shift) & 0xf;
}
if (i2c_index == 0x80) /* g80+ */
- i2c_index = bdcb->i2c_default_indices & 0xf;
+ i2c_index = dcb->i2c_default_indices & 0xf;
+ else
+ if (i2c_index == 0x81)
+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+ return NULL;
+ }
+
+ /* Make sure i2c table entry has been parsed, it may not
+ * have been if this is a bus not referenced by a DCB encoder
+ */
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
-static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+static uint32_t
+get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
{
/*
* For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +847,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
const int pramdac_offset[13] = {
0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
const uint32_t pramdac_table[4] = {
@@ -756,13 +860,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
dcb_entry = dcb_entry_idx_from_crtchead(dev);
if (dcb_entry == 0x7f)
return 0;
- dacoffset = pramdac_offset[
- dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+ dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
if (mlv == 0x81)
dacoffset ^= 8;
return 0x6808b0 + dacoffset;
} else {
- if (mlv > ARRAY_SIZE(pramdac_table)) {
+ if (mlv >= ARRAY_SIZE(pramdac_table)) {
NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
mlv);
return 0;
@@ -817,7 +920,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -919,7 +1022,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1066,6 +1169,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
}
static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DP_CONDITION opcode: 0x3A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): "sub" opcode
+ * offset + 2 (8 bit): unknown
+ *
+ */
+
+ struct bit_displayport_encoder_table *dpe = NULL;
+ struct dcb_entry *dcb = bios->display.output;
+ struct drm_device *dev = bios->dev;
+ uint8_t cond = bios->data[offset + 1];
+ int dummy;
+
+ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+ if (!iexec->execute)
+ return 3;
+
+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+ if (!dpe) {
+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+ return -EINVAL;
+ }
+
+ switch (cond) {
+ case 0:
+ {
+ struct dcb_connector_table_entry *ent =
+ &bios->dcb.connector.entry[dcb->connector];
+
+ if (ent->type != DCB_CONNECTOR_eDP)
+ iexec->execute = false;
+ }
+ break;
+ case 1:
+ case 2:
+ if (!(dpe->unknown & cond))
+ iexec->execute = false;
+ break;
+ case 5:
+ {
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+ if (ret)
+ return ret;
+
+ if (cond & 1)
+ iexec->execute = false;
+ }
+ break;
+ default:
+ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+ break;
+ }
+
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+ return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3B opcode: 0x3B ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+ return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3C opcode: 0x3C ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+ return 2;
+}
+
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1169,7 +1392,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1230,12 +1453,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 3;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1246,35 +1468,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t reg = bios->data[offset + 4 + i * 3];
uint8_t mask = bios->data[offset + 5 + i * 3];
uint8_t data = bios->data[offset + 6 + i * 3];
- uint8_t value;
+ union i2c_smbus_data val;
- msg.addr = i2c_address;
- msg.flags = I2C_M_RD;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, value, mask, data);
+ offset, reg, val.byte, mask, data);
- value = (value & mask) | data;
+ if (!bios->execute)
+ continue;
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ val.byte &= mask;
+ val.byte |= data;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1300,12 +1521,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 2;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1316,23 +1536,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
- uint8_t data = bios->data[offset + 5 + i * 2];
+ uint8_t reg = bios->data[offset + 4 + i * 2];
+ union i2c_smbus_data val;
+
+ val.byte = bios->data[offset + 5 + i * 2];
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, data);
-
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ offset, reg, val.byte);
+
+ if (!bios->execute)
+ continue;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1356,7 +1578,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count;
struct nouveau_i2c_chan *chan;
@@ -1373,7 +1595,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1387,7 +1609,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ return -EIO;
}
return len;
@@ -1426,7 +1648,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1470,7 +1692,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1945,7 +2167,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2000,7 +2222,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2033,7 +2255,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2572,48 +2794,37 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* each GPIO according to various values listed in each entry
*/
- const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
- const uint8_t *gpio_entry;
int i;
- if (!iexec->execute)
- return 1;
-
- if (bios->bdcb.version != 0x40) {
- NV_ERROR(bios->dev, "DCB table not version 4.0\n");
- return 0;
- }
-
- if (!bios->bdcb.gpio_table_ptr) {
- NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
- return 0;
+ if (dev_priv->card_type != NV_50) {
+ NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
+ return -ENODEV;
}
- gpio_entry = gpio_table + gpio_table[1];
- for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
- uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
- int line = (entry & 0x0000001f);
+ if (!iexec->execute)
+ return 1;
- BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
+ uint32_t r, s, v;
- if ((entry & 0x0000ff00) == 0x0000ff00)
- continue;
+ BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
- r = nv50_gpio_reg[line >> 3];
- s = (line & 0x07) << 2;
- v = bios_rd32(bios, r) & ~(0x00000003 << s);
- if (entry & 0x01000000)
- v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
- else
- v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
- bios_wr32(bios, r, v);
+ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
+ offset, gpio->tag, gpio->state_default);
+ if (bios->execute)
+ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
- r = nv50_gpio_ctl[line >> 4];
- s = (line & 0x0f);
+ /* The NVIDIA binary driver doesn't appear to actually do
+ * any of this, my VBIOS does however.
+ */
+ /* Not a clue, needs de-magicing */
+ r = nv50_gpio_ctl[gpio->line >> 4];
+ s = (gpio->line & 0x0f);
v = bios_rd32(bios, r) & ~(0x00010001 << s);
- switch ((entry & 0x06000000) >> 25) {
+ switch ((gpio->entry & 0x06000000) >> 25) {
case 1:
v |= (0x00000001 << s);
break;
@@ -2669,7 +2880,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return 0;
+ return -EINVAL;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2853,14 +3064,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2873,7 +3084,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return 0;
+ return ret;
}
data &= bios->data[offset + 0];
@@ -2882,7 +3093,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2912,14 +3123,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2930,7 +3141,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2947,6 +3158,9 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_COPY" , 0x37, init_copy },
{ "INIT_NOT" , 0x38, init_not },
{ "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
+ { "INIT_OP_3B" , 0x3B, init_op_3b },
+ { "INIT_OP_3C" , 0x3C, init_op_3c },
{ "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
{ "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
{ "INIT_PLL2" , 0x4B, init_pll2 },
@@ -3014,7 +3228,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i, res;
+ int count = 0, i, ret;
uint8_t id;
/*
@@ -3029,26 +3243,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
;
- if (itbl_entry[i].name) {
- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
- offset, itbl_entry[i].id, itbl_entry[i].name);
-
- /* execute eventual command handler */
- res = (*itbl_entry[i].handler)(bios, offset, iexec);
- if (!res)
- break;
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += res;
- } else {
+ if (!itbl_entry[i].name) {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
+
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+ itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (ret < 0) {
+ NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+ "table opcode: %s %d\n", offset,
+ itbl_entry[i].name, ret);
+ }
+
+ if (ret <= 0)
+ break;
+
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += ret;
}
if (offset >= bios->length)
@@ -3123,7 +3344,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
struct dcb_entry *dcbent, int head, bool dl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = {true, false};
NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3361,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
@@ -3194,10 +3415,9 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
* of a list of pxclks and script pointers.
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
uint16_t scriptptr = 0, clktable;
- uint8_t clktableptr = 0;
/*
* For now we assume version 3.0 table - g80 support will need some
@@ -3216,26 +3436,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
break;
case LVDS_RESET:
+ clktable = bios->fp.lvdsmanufacturerpointer + 15;
+ if (dcbent->or == 4)
+ clktable += 8;
+
if (dcbent->lvdsconf.use_straps_for_mode) {
if (bios->fp.dual_link)
- clktableptr += 2;
- if (bios->fp.BITbit1)
- clktableptr++;
+ clktable += 4;
+ if (bios->fp.if_is_24bit)
+ clktable += 2;
} else {
/* using EDID */
- uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
- int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
+ int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
if (bios->fp.dual_link) {
- clktableptr += 2;
- fallbackcmpval *= 2;
+ clktable += 4;
+ cmpval_24bit <<= 1;
}
- if (fallbackcmpval & fallback)
- clktableptr++;
+
+ if (bios->fp.strapless_is_24bit & cmpval_24bit)
+ clktable += 2;
}
- /* adding outputset * 8 may not be correct */
- clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
+ clktable = ROM16(bios->data[clktable]);
if (!clktable) {
NV_ERROR(dev, "Pixel clock comparison table not found\n");
return -ENOENT;
@@ -3261,7 +3484,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
int ret;
@@ -3395,7 +3618,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
#ifndef __powerpc__
NV_ERROR(dev, "Pointer to flat panel table invalid\n");
#endif
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->digital_min_front_porch = 0x4b;
return 0;
}
@@ -3428,7 +3651,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
* fptable[4] is the minimum
* RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
*/
- bios->pub.digital_min_front_porch = fptable[4];
+ bios->digital_min_front_porch = fptable[4];
ofs = -7;
break;
default:
@@ -3467,7 +3690,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
if (lth.lvds_ver > 0x10)
- bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+ bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
/*
* If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3714,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
if (!mode) /* just checking whether we can produce a mode */
@@ -3544,7 +3767,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
* at which modes should be set up in the dual link style.
*
* Following the header, the BMP (ver 0xa) table has several records,
- * indexed by a seperate xlat table, indexed in turn by the fp strap in
+ * indexed by a separate xlat table, indexed in turn by the fp strap in
* EXTDEV_BOOT. Each record had a config byte, followed by 6 script
* numbers for use by INIT_SUB which controlled panel init and power,
* and finally a dword of ms to sleep between power off and on
@@ -3562,11 +3785,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
* until later, when this function should be called with non-zero pxclk
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
struct lvdstableheader lth;
uint16_t lvdsofs;
- int ret, chip_version = bios->pub.chip_version;
+ int ret, chip_version = bios->chip_version;
ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
if (ret)
@@ -3637,37 +3860,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
*if_is_24bit = bios->data[lvdsofs] & 16;
break;
case 0x30:
- /*
- * My money would be on there being a 24 bit interface bit in
- * this table, but I have no example of a laptop bios with a
- * 24 bit panel to confirm that. Hence we shout loudly if any
- * bit other than bit 0 is set (I've not even seen bit 1)
- */
- if (bios->data[lvdsofs] > 1)
- NV_ERROR(dev,
- "You have a very unusual laptop display; please report it\n");
+ case 0x40:
/*
* No sign of the "power off for reset" or "reset for panel
* on" bits, but it's safer to assume we should
*/
bios->fp.power_off_for_reset = true;
bios->fp.reset_after_pclk_change = true;
+
/*
* It's ok lvdsofs is wrong for nv4x edid case; dual_link is
- * over-written, and BITbit1 isn't used
+ * over-written, and if_is_24bit isn't used
*/
bios->fp.dual_link = bios->data[lvdsofs] & 1;
- bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
- bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
- break;
- case 0x40:
- bios->fp.dual_link = bios->data[lvdsofs] & 1;
bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
break;
}
+ /* Dell Latitude D620 reports a too-high value for the dual-link
+ * transition freq, causing us to program the panel incorrectly.
+ *
+ * It doesn't appear the VBIOS actually uses its transition freq
+ * (90000kHz), instead it uses the "Number of LVDS channels" field
+ * out of the panel ID structure (http://www.spwg.org/).
+ *
+ * For the moment, a quirk will do :)
+ */
+ if ((dev->pdev->device == 0x01d7) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x01c2)) {
+ bios->fp.duallink_transition_clk = 80000;
+ }
+
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -3679,20 +3905,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
static uint8_t *
bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
- uint16_t record, int record_len, int record_nr)
+ uint16_t record, int record_len, int record_nr,
+ bool match_link)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t entry;
uint16_t table;
int i, v;
+ switch (dcbent->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ case OUTPUT_DP:
+ break;
+ default:
+ match_link = false;
+ break;
+ }
+
for (i = 0; i < record_nr; i++, record += record_len) {
table = ROM16(bios->data[record]);
if (!table)
continue;
entry = ROM32(bios->data[table]);
+ if (match_link) {
+ v = (entry & 0x00c00000) >> 22;
+ if (!(v & dcbent->sorconf.link))
+ continue;
+ }
+
v = (entry & 0x000f0000) >> 16;
if (!(v & dcbent->or))
continue;
@@ -3716,7 +3959,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
int *length)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table;
if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3968,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
table = &bios->data[bios->display.dp_table_ptr];
- if (table[0] != 0x21) {
+ if (table[0] != 0x20 && table[0] != 0x21) {
NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
table[0]);
return NULL;
@@ -3734,7 +3977,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
*length = table[4];
return bios_output_config_match(dev, dcbent,
bios->display.dp_table_ptr + table[1],
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
}
int
@@ -3765,7 +4008,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
uint16_t script;
@@ -3823,7 +4066,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
dcbent->type, dcbent->location, dcbent->or);
otable = bios_output_config_match(dev, dcbent, table[1] +
bios->display.script_table_ptr,
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
if (!otable) {
NV_ERROR(dev, "Couldn't find matching output script table\n");
return 1;
@@ -3918,8 +4161,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
uint32_t sel_clk_binding, sel_clk;
@@ -3978,8 +4221,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version, pllindex = 0;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version, pllindex = 0;
uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
uint32_t crystal_strap_mask, crystal_straps;
@@ -4293,31 +4536,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
break;
}
-#if 0 /* for easy debugging */
- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
- ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ if (pll_lim->vco2.maxfreq) {
+ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+ }
+ if (!pll_lim->max_p) {
+ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+ } else {
+ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+ }
+ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
return 0;
}
@@ -4332,7 +4576,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
*/
bios->major_version = bios->data[offset + 3];
- bios->pub.chip_version = bios->data[offset + 2];
+ bios->chip_version = bios->data[offset + 2];
NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
bios->data[offset + 3], bios->data[offset + 2],
bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4646,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
}
/* First entry is normal dac, 2nd tv-out perhaps? */
- bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+ bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
return 0;
}
@@ -4526,8 +4770,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -ENOSYS;
}
- bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
- bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+ bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+ bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
return 0;
}
@@ -4796,11 +5040,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +5151,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -4961,82 +5205,10 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max index 15)\n", i2ctable[2]);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index > i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
- struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+ struct dcb_gpio_table *gpio = &bios->dcb.gpio;
return &gpio->entry[gpio->entries++];
}
@@ -5045,14 +5217,14 @@ struct dcb_gpio_entry *
nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i;
- for (i = 0; i < bios->bdcb.gpio.entries; i++) {
- if (bios->bdcb.gpio.entry[i].tag != tag)
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ if (bios->dcb.gpio.entry[i].tag != tag)
continue;
- return &bios->bdcb.gpio.entry[i];
+ return &bios->dcb.gpio.entry[i];
}
return NULL;
@@ -5075,32 +5247,32 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
gpio->tag = tag;
gpio->line = line;
gpio->invert = flags != 4;
+ gpio->entry = ent;
}
static void
parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
{
+ uint32_t entry = ROM32(bios->data[offset]);
struct dcb_gpio_entry *gpio;
- uint32_t ent = ROM32(bios->data[offset]);
- uint8_t line = ent & 0x1f,
- tag = ent >> 8 & 0xff;
- if (tag == 0xff)
+ if ((entry & 0x0000ff00) == 0x0000ff00)
return;
gpio = new_gpio_entry(bios);
-
- /* Currently unused, we may need more fields parsed at some
- * point. */
- gpio->tag = tag;
- gpio->line = line;
+ gpio->tag = (entry & 0x0000ff00) >> 8;
+ gpio->line = (entry & 0x0000001f) >> 0;
+ gpio->state_default = (entry & 0x01000000) >> 24;
+ gpio->state[0] = (entry & 0x18000000) >> 27;
+ gpio->state[1] = (entry & 0x60000000) >> 29;
+ gpio->entry = entry;
}
static void
parse_dcb_gpio_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+ uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
uint8_t *gpio_table = &bios->data[gpio_table_ptr];
int header_len = gpio_table[1],
entries = gpio_table[2],
@@ -5108,7 +5280,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
int i;
- if (bios->bdcb.version >= 0x40) {
+ if (bios->dcb.version >= 0x40) {
if (gpio_table_ptr && entry_len != 4) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5116,7 +5288,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb40_gpio_entry;
- } else if (bios->bdcb.version >= 0x30) {
+ } else if (bios->dcb.version >= 0x30) {
if (gpio_table_ptr && entry_len != 2) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5124,7 +5296,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb30_gpio_entry;
- } else if (bios->bdcb.version >= 0x22) {
+ } else if (bios->dcb.version >= 0x22) {
/*
* DCBs older than v3.0 don't really have a GPIO
* table, instead they keep some GPIO info at fixed
@@ -5158,30 +5330,82 @@ struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_connector_table_entry *cte;
- if (index >= bios->bdcb.connector.entries)
+ if (index >= bios->dcb.connector.entries)
return NULL;
- cte = &bios->bdcb.connector.entry[index];
+ cte = &bios->dcb.connector.entry[index];
if (cte->type == 0xff)
return NULL;
return cte;
}
+static enum dcb_connector_type
+divine_connector_type(struct nvbios *bios, int index)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].connector == index)
+ encoders |= (1 << dcb->entry[i].type);
+ }
+
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ type = DCB_CONNECTOR_DP;
+ else
+ type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DCB_CONNECTOR_DVI_I;
+ else
+ type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ type = DCB_CONNECTOR_TV_0;
+ }
+
+ return type;
+}
+
+static void
+apply_dcb_connector_quirks(struct nvbios *bios, int idx)
+{
+ struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+ struct drm_device *dev = bios->dev;
+
+ /* Gigabyte NX85T */
+ if ((dev->pdev->device == 0x0421) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x344c)) {
+ if (cte->type == DCB_CONNECTOR_HDMI_1)
+ cte->type = DCB_CONNECTOR_DVI_I;
+ }
+}
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->bdcb.connector;
+ struct dcb_connector_table *ct = &bios->dcb.connector;
struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+ uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
uint8_t *entry;
int i;
- if (!bios->bdcb.connector_table_ptr) {
+ if (!bios->dcb.connector_table_ptr) {
NV_DEBUG_KMS(dev, "No DCB connector table present\n");
return;
}
@@ -5199,12 +5423,14 @@ parse_dcb_connector_table(struct nvbios *bios)
entry = conntab + conntab[1];
cte = &ct->entry[0];
for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+ cte->index = i;
if (conntab[3] == 2)
cte->entry = ROM16(entry[0]);
else
cte->entry = ROM32(entry[0]);
+
cte->type = (cte->entry & 0x000000ff) >> 0;
- cte->index = (cte->entry & 0x00000f00) >> 8;
+ cte->index2 = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
case 0x00001000:
cte->gpio_tag = 0x07;
@@ -5226,12 +5452,43 @@ parse_dcb_connector_table(struct nvbios *bios)
if (cte->type == 0xff)
continue;
+ apply_dcb_connector_quirks(bios, i);
+
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+
+ /* check for known types, fallback to guessing the type
+ * from attached encoders if we hit an unknown.
+ */
+ switch (cte->type) {
+ case DCB_CONNECTOR_VGA:
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ break;
+ default:
+ cte->type = divine_connector_type(bios, cte->index);
+ NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
+ break;
+ }
+
+ if (nouveau_override_conntype) {
+ int type = divine_connector_type(bios, cte->index);
+ if (type != cte->type)
+ NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+ }
+
}
}
-static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
{
struct dcb_entry *entry = &dcb->entry[dcb->entries];
@@ -5241,7 +5498,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
return entry;
}
-static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5252,7 +5509,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
/* "or" mostly unused in early gen crt modesetting, 0 is fine */
}
-static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5279,7 +5536,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
#endif
}
-static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5290,23 +5547,17 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
}
static bool
-parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
- /*
- * Normal entries consist of a single bit, but dual link has the
- * next most significant bit set too
- */
- entry->duallink_possible =
- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
switch (entry->type) {
case OUTPUT_ANALOG:
@@ -5314,7 +5565,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Although the rest of a CRT conf dword is usually
* zeros, mac biosen have stuff there so we must mask
*/
- entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+ entry->crtconf.maxfreq = (dcb->version < 0x30) ?
(conf & 0xffff) * 10 :
(conf & 0xff) * 10000;
break;
@@ -5323,7 +5574,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
uint32_t mask;
if (conf & 0x1)
entry->lvdsconf.use_straps_for_mode = true;
- if (bdcb->version < 0x22) {
+ if (dcb->version < 0x22) {
mask = ~0xd;
/*
* The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5598,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Until we even try to use these on G8x, it's
* useless reporting unknown bits. They all are.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
break;
NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5608,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
case OUTPUT_TV:
{
- if (bdcb->version >= 0x30)
+ if (dcb->version >= 0x30)
entry->tvconf.has_component_output = conf & (0x8 << 4);
else
entry->tvconf.has_component_output = false;
@@ -5384,8 +5635,20 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
break;
case 0xe:
/* weird g80 mobile type that "nv" treats as a terminator */
- bdcb->dcb.entries--;
+ dcb->entries--;
return false;
+ default:
+ break;
+ }
+
+ if (dcb->version < 0x40) {
+ /* Normal entries consist of a single bit, but dual link has
+ * the next most significant bit set too
+ */
+ entry->duallink_possible =
+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+ } else {
+ entry->duallink_possible = (entry->sorconf.link == 3);
}
/* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5659,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
static bool
-parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
switch (conn & 0x0000000f) {
@@ -5462,27 +5725,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf)
{
- struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+ struct dcb_entry *entry = new_dcb_entry(dcb);
bool ret;
- if (bdcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
else
- ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
if (!ret)
return ret;
- read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
- entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ entry->i2c_index, &dcb->i2c[entry->i2c_index]);
return true;
}
static
-void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
/*
* DCB v2.0 lists each output combination separately.
@@ -5534,8 +5797,7 @@ static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb;
+ struct dcb_table *dcb = &bios->dcb;
uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5805,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
int recordlength = 8, confofs = 4;
int i;
- dcb = bios->pub.dcb = &bdcb->dcb;
- dcb->entries = 0;
-
/* get the offset from 0x36 */
if (dev_priv->card_type > NV_04) {
dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5826,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
dcbtable = &bios->data[dcbptr];
/* get DCB version */
- bdcb->version = dcbtable[0];
+ dcb->version = dcbtable[0];
NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- bdcb->version >> 4, bdcb->version & 0xf);
+ dcb->version >> 4, dcb->version & 0xf);
- if (bdcb->version >= 0x20) { /* NV17+ */
+ if (dcb->version >= 0x20) { /* NV17+ */
uint32_t sig;
- if (bdcb->version >= 0x30) { /* NV40+ */
+ if (dcb->version >= 0x30) { /* NV40+ */
headerlen = dcbtable[1];
entries = dcbtable[2];
recordlength = dcbtable[3];
i2ctabptr = ROM16(dcbtable[4]);
sig = ROM32(dcbtable[6]);
- bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
- bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+ dcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ dcb->connector_table_ptr = ROM16(dcbtable[20]);
} else {
i2ctabptr = ROM16(dcbtable[2]);
sig = ROM32(dcbtable[4]);
@@ -5593,7 +5852,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
"signature (%08X)\n", sig);
return -EINVAL;
}
- } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+ } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
char sig[8] = { 0 };
strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5900,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
if (!i2ctabptr)
NV_WARN(dev, "No pointer to DCB I2C port table\n");
else {
- bdcb->i2c_table = &bios->data[i2ctabptr];
- if (bdcb->version >= 0x30)
- bdcb->i2c_default_indices = bdcb->i2c_table[4];
+ dcb->i2c_table = &bios->data[i2ctabptr];
+ if (dcb->version >= 0x30)
+ dcb->i2c_default_indices = dcb->i2c_table[4];
}
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
-
if (entries > DCB_MAX_NUM_ENTRIES)
entries = DCB_MAX_NUM_ENTRIES;
@@ -5673,7 +5929,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
dcb->entries, connection, config);
- if (!parse_dcb_entry(dev, bdcb, connection, config))
+ if (!parse_dcb_entry(dev, dcb, connection, config))
break;
}
@@ -5681,18 +5937,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
* apart for v2.1+ not being known for requiring merging, this
* guarantees dcbent->index is the index of the entry in the rom image
*/
- if (bdcb->version < 0x21)
+ if (dcb->version < 0x21)
merge_like_dcb_entries(dev, dcb);
- return dcb->entries ? 0 : -ENXIO;
+ if (!dcb->entries)
+ return -ENXIO;
+
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+ return 0;
}
static void
fixup_legacy_connector(struct nvbios *bios)
{
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb = &bdcb->dcb;
- int high = 0, i;
+ struct dcb_table *dcb = &bios->dcb;
+ int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
/*
* DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5960,11 @@ fixup_legacy_connector(struct nvbios *bios)
* indices are all 0. We don't need the connector indices on pre-G80
* chips (yet?) so limit the use to DCB 4.0 and above.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
return;
+ dcb->connector.entries = 0;
+
/*
* No known connector info before v3.0, so make it up. the rule here
* is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5972,38 @@ fixup_legacy_connector(struct nvbios *bios)
* its own unique connector index.
*/
for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == 0xf)
- continue;
-
/*
* Ignore the I2C index for on-chip TV-out, as there
* are cards with bogus values (nv31m in bug 23212),
* and it's otherwise useless.
*/
if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+ dcb->entry[i].location == DCB_LOC_ON_CHIP)
dcb->entry[i].i2c_index = 0xf;
+ i2c = dcb->entry[i].i2c_index;
+
+ if (i2c_conn[i2c]) {
+ dcb->entry[i].connector = i2c_conn[i2c] - 1;
continue;
}
- dcb->entry[i].connector = dcb->entry[i].i2c_index;
- if (dcb->entry[i].connector > high)
- high = dcb->entry[i].connector;
+ dcb->entry[i].connector = dcb->connector.entries++;
+ if (i2c != 0xf)
+ i2c_conn[i2c] = dcb->connector.entries;
}
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index != 0xf)
- continue;
-
- dcb->entry[i].connector = ++high;
+ /* Fake the connector table as well as just connector indices */
+ for (i = 0; i < dcb->connector.entries; i++) {
+ dcb->connector.entry[i].index = i;
+ dcb->connector.entry[i].type = divine_connector_type(bios, i);
+ dcb->connector.entry[i].gpio_tag = 0xff;
}
}
static void
fixup_legacy_i2c(struct nvbios *bios)
{
- struct parsed_dcb *dcb = &bios->bdcb.dcb;
+ struct dcb_table *dcb = &bios->dcb;
int i;
for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +6089,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
@@ -5859,7 +6122,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct dcb_entry *dcbent)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
mutex_lock(&bios->lock);
@@ -5872,7 +6135,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
static bool NVInitVBIOS(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
mutex_init(&bios->lock);
@@ -5888,7 +6151,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
static int nouveau_parse_vbios_struct(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
int offset;
@@ -5915,7 +6178,7 @@ int
nouveau_run_vbios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i, ret = 0;
NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6209,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
}
if (dev_priv->card_type >= NV_50) {
- for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+ for (i = 0; i < bios->dcb.entries; i++) {
nouveau_bios_run_display_table(dev,
- &bios->bdcb.dcb.entry[i],
+ &bios->dcb.entry[i],
0, 0);
}
}
@@ -5962,26 +6225,48 @@ static void
nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_i2c_entry *entry;
int i;
- entry = &bios->bdcb.dcb.i2c[0];
+ entry = &bios->dcb.i2c[0];
for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
nouveau_i2c_fini(dev, entry);
}
+static bool
+nouveau_bios_posted(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool was_locked;
+ unsigned htotal;
+
+ if (dev_priv->chipset >= NV_50) {
+ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+ NVReadVgaCrtc(dev, 0, 0x1a) == 0)
+ return false;
+ return true;
+ }
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+ htotal = NVReadVgaCrtc(dev, 0, 0x06);
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
+ NVLockVgaCrtcs(dev, was_locked);
+ return (htotal != 0);
+}
+
int
nouveau_bios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t saved_nv_pextdev_boot_0;
bool was_locked;
int ret;
- dev_priv->vbios = &bios->pub;
-
if (!NVInitVBIOS(dev))
return -ENODEV;
@@ -6007,11 +6292,9 @@ nouveau_bios_init(struct drm_device *dev)
bios->execute = false;
/* ... unless card isn't POSTed already */
- if (dev_priv->card_type >= NV_10 &&
- NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
- NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+ if (!nouveau_bios_posted(dev)) {
NV_INFO(dev, "Adaptor not initialised\n");
- if (dev_priv->card_type < NV_50) {
+ if (dev_priv->card_type < NV_40) {
NV_ERROR(dev, "Unable to POST this chipset\n");
return -ENODEV;
}
@@ -6023,10 +6306,8 @@ nouveau_bios_init(struct drm_device *dev)
bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
ret = nouveau_run_vbios_init(dev);
- if (ret) {
- dev_priv->vbios = NULL;
+ if (ret)
return ret;
- }
/* feature_byte on BMP is poor, but init always sets CR4B */
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6dc264..adf4ec2d06c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,72 @@
#define DCB_LOC_ON_CHIP 0
+struct dcb_i2c_entry {
+ uint32_t entry;
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
+enum dcb_gpio_tag {
+ DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+ uint32_t entry;
+ uint8_t state_default;
+ uint8_t state[2];
+};
+
+struct dcb_gpio_table {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+enum dcb_connector_type {
+ DCB_CONNECTOR_VGA = 0x00,
+ DCB_CONNECTOR_TV_0 = 0x10,
+ DCB_CONNECTOR_TV_1 = 0x11,
+ DCB_CONNECTOR_TV_3 = 0x13,
+ DCB_CONNECTOR_DVI_I = 0x30,
+ DCB_CONNECTOR_DVI_D = 0x31,
+ DCB_CONNECTOR_LVDS = 0x40,
+ DCB_CONNECTOR_DP = 0x46,
+ DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_HDMI_0 = 0x60,
+ DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_NONE = 0xff
+};
+
+struct dcb_connector_table_entry {
+ uint8_t index;
+ uint32_t entry;
+ enum dcb_connector_type type;
+ uint8_t index2;
+ uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+enum dcb_type {
+ OUTPUT_ANALOG = 0,
+ OUTPUT_TV = 1,
+ OUTPUT_TMDS = 2,
+ OUTPUT_LVDS = 3,
+ OUTPUT_DP = 6,
+ OUTPUT_ANY = -1
+};
+
struct dcb_entry {
int index; /* may not be raw dcb index if merging has happened */
- uint8_t type;
+ enum dcb_type type;
uint8_t i2c_index;
uint8_t heads;
uint8_t connector;
@@ -71,69 +134,22 @@ struct dcb_entry {
bool i2c_upper_default;
};
-struct dcb_i2c_entry {
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
+struct dcb_table {
+ uint8_t version;
-struct parsed_dcb {
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-};
-
-enum dcb_gpio_tag {
- DCB_GPIO_TVDAC0 = 0xc,
- DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
-};
-
-struct parsed_dcb_gpio {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
-};
-
-struct dcb_connector_table_entry {
- uint32_t entry;
- uint8_t type;
- uint8_t index;
- uint8_t gpio_tag;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
-struct bios_parsed_dcb {
- uint8_t version;
-
- struct parsed_dcb dcb;
uint8_t *i2c_table;
uint8_t i2c_default_indices;
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
uint16_t gpio_table_ptr;
- struct parsed_dcb_gpio gpio;
+ struct dcb_gpio_table gpio;
uint16_t connector_table_ptr;
struct dcb_connector_table connector;
};
-enum nouveau_encoder_type {
- OUTPUT_ANALOG = 0,
- OUTPUT_TV = 1,
- OUTPUT_TMDS = 2,
- OUTPUT_LVDS = 3,
- OUTPUT_DP = 6,
- OUTPUT_ANY = -1
-};
-
enum nouveau_or {
OUTPUT_A = (1 << 0),
OUTPUT_B = (1 << 1),
@@ -190,8 +206,8 @@ struct pll_lims {
int refclk;
};
-struct nouveau_bios_info {
- struct parsed_dcb *dcb;
+struct nvbios {
+ struct drm_device *dev;
uint8_t chip_version;
@@ -199,11 +215,6 @@ struct nouveau_bios_info {
uint32_t tvdactestval;
uint8_t digital_min_front_porch;
bool fp_no_ddc;
-};
-
-struct nvbios {
- struct drm_device *dev;
- struct nouveau_bios_info pub;
struct mutex lock;
@@ -234,7 +245,7 @@ struct nvbios {
uint16_t some_script_ptr; /* BIT I + 14 */
uint16_t init96_tbl_ptr; /* BIT I + 16 */
- struct bios_parsed_dcb bdcb;
+ struct dcb_table dcb;
struct {
int crtchead;
@@ -260,7 +271,6 @@ struct nvbios {
bool reset_after_pclk_change;
bool dual_link;
bool link_c_increment;
- bool BITbit1;
bool if_is_24bit;
int duallink_transition_clk;
uint8_t strapless_is_24bit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 028719fddf76..6f3c19522377 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -34,6 +34,7 @@
#include "nouveau_dma.h"
#include <linux/log2.h>
+#include <linux/slab.h>
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
@@ -71,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev,
* many small buffers.
*/
if (dev_priv->card_type == NV_50) {
- uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
+ uint32_t block_size = dev_priv->vram_size >> 15;
int i;
switch (tile_flags) {
@@ -153,17 +154,17 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
- nouveau_bo_placement_set(nvbo, flags);
+ nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
- nvbo->channel = NULL;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ nvbo->channel = NULL;
spin_lock(&dev_priv->ttm.bo_list_lock);
list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -172,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return 0;
}
+static void
+set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
+{
+ *n = 0;
+
+ if (type & TTM_PL_FLAG_VRAM)
+ pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
+ if (type & TTM_PL_FLAG_TT)
+ pl[(*n)++] = TTM_PL_FLAG_TT | flags;
+ if (type & TTM_PL_FLAG_SYSTEM)
+ pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
+}
+
void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
{
- int n = 0;
-
- if (memtype & TTM_PL_FLAG_VRAM)
- nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
- if (memtype & TTM_PL_FLAG_TT)
- nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- if (memtype & TTM_PL_FLAG_SYSTEM)
- nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
- nvbo->placement.placement = nvbo->placements;
- nvbo->placement.busy_placement = nvbo->placements;
- nvbo->placement.num_placement = n;
- nvbo->placement.num_busy_placement = n;
-
- if (nvbo->pin_refcnt) {
- while (n--)
- nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
- }
+ struct ttm_placement *pl = &nvbo->placement;
+ uint32_t flags = TTM_PL_MASK_CACHING |
+ (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
+
+ pl->placement = nvbo->placements;
+ set_placement_list(nvbo->placements, &pl->num_placement,
+ type, flags);
+
+ pl->busy_placement = nvbo->busy_placements;
+ set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+ type | busy, flags);
}
int
@@ -199,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret, i;
+ int ret;
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
NV_ERROR(nouveau_bdev(bo->bdev)->dev,
@@ -215,11 +223,9 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
if (ret)
goto out;
- nouveau_bo_placement_set(nvbo, memtype);
- for (i = 0; i < nvbo->placement.num_placement; i++)
- nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -244,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret, i;
+ int ret;
if (--nvbo->pin_refcnt)
return 0;
@@ -253,10 +259,9 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ret)
return ret;
- for (i = 0; i < nvbo->placement.num_placement; i++)
- nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -386,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
-
- man->io_addr = NULL;
- man->io_offset = drm_get_resource_start(dev, 1);
- man->io_size = drm_get_resource_len(dev, 1);
- if (man->io_size > nouveau_mem_fb_amount(dev))
- man->io_size = nouveau_mem_fb_amount(dev);
-
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@@ -419,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
-
- man->io_offset = dev_priv->gart_info.aper_base;
- man->io_size = dev_priv->gart_info.aper_size;
- man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@@ -439,11 +431,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
TTM_PL_FLAG_SYSTEM);
break;
default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
break;
}
@@ -457,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct nouveau_bo *nvbo, bool evict,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@@ -468,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@@ -492,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- int no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -570,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -588,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -596,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@@ -613,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -626,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -701,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -716,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -730,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@@ -757,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -769,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+ .io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b84504d05..88f9bc0941eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99da7fc..1fc57ef58295 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
int ret;
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->vm_end, NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_AGP, &pushbuf);
+ chan->pushbuf_base = pb->bo.offset;
+ } else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf,
NULL);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
}
ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -137,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
GFP_KERNEL);
if (!dev_priv->fifos[channel])
return -ENOMEM;
- dev_priv->fifo_alloc_count++;
chan = dev_priv->fifos[channel];
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
INIT_LIST_HEAD(&chan->fence.pending);
@@ -275,9 +279,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
*/
nouveau_fence_fini(chan);
- /* Ensure the channel is no longer active on the GPU */
+ /* This will prevent pfifo from switching channels. */
pfifo->reassign(dev, false);
+ /* We want to give pgraph a chance to idle and get rid of all potential
+ * errors. We need to do this before the lock, otherwise the irq handler
+ * is unable to process them.
+ */
+ if (pgraph->channel(dev) == chan)
+ nouveau_wait_for_idle(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
pgraph->fifo_access(dev, false);
if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
@@ -293,6 +306,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
/* Release the channel's resources */
nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
if (chan->pushbuf_bo) {
@@ -305,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan)
iounmap(chan->user);
dev_priv->fifos[chan->id] = NULL;
- dev_priv->fifo_alloc_count--;
kfree(chan);
}
@@ -369,6 +383,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
return ret;
init->channel = chan->id;
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+
init->subchan[0].handle = NvM2MF;
if (dev_priv->card_type < NV_50)
init->subchan[0].grclass = 0x0039;
@@ -408,7 +430,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +439,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f63353ea97..149ed224c3cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,18 @@ nouveau_connector_detect(struct drm_connector *connector)
struct nouveau_i2c_chan *i2c;
int type, flags;
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
-#ifdef CONFIG_ACPI
+ unsigned status = connector_status_connected;
+
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (!nouveau_ignorelid && !acpi_lid_open())
- return connector_status_disconnected;
+ status = connector_status_unknown;
#endif
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ return status;
}
/* Cleanup the previous EDID block. */
@@ -279,7 +282,7 @@ nouveau_connector_detect(struct drm_connector *connector)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -300,7 +303,7 @@ nouveau_connector_detect(struct drm_connector *connector)
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
- if (!nv_encoder)
+ if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
if (nv_encoder) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -321,11 +324,11 @@ detect_analog:
static void
nouveau_connector_force(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -335,7 +338,7 @@ nouveau_connector_force(struct drm_connector *connector)
nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
- NV_ERROR(dev, "can't find encoder to force %s on!\n",
+ NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
connector->status = connector_status_disconnected;
return;
@@ -369,7 +372,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
/* LVDS always needs gpu scaling */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
value == DRM_MODE_SCALE_NONE)
return -EINVAL;
@@ -429,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
static struct drm_display_mode *
-nouveau_connector_native_mode(struct nouveau_connector *connector)
+nouveau_connector_native_mode(struct drm_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
- /* Use preferred mode if there is one.. */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ if (helper->mode_valid(connector, mode) != MODE_OK)
+ continue;
+
+ /* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
- }
- /* Otherwise, take the resolution with the largest width, then height,
- * then vertical refresh
- */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
if (mode->hdisplay < high_w)
continue;
@@ -535,7 +541,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* If we're not LVDS, destroy the previous native mode, the attached
* monitor could have changed.
*/
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
@@ -550,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
- nouveau_connector_native_mode(nv_connector);
+ nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -563,7 +569,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = get_slave_funcs(nv_encoder)->
get_modes(to_drm_encoder(nv_encoder), connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -581,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
switch (nv_encoder->dcb->type) {
case OUTPUT_LVDS:
- BUG_ON(!nv_connector->native_mode);
- if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
- mode->vdisplay > nv_connector->native_mode->vdisplay)
+ if (nv_connector->native_mode &&
+ (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
@@ -591,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
break;
case OUTPUT_TMDS:
if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
- (dev_priv->card_type < NV_50 &&
- !nv_encoder->dcb->duallink_possible))
+ !nv_encoder->dcb->duallink_possible)
max_clock = 165000;
else
max_clock = 330000;
@@ -613,6 +618,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
clock *= 3;
break;
+ default:
+ BUG_ON(1);
+ return MODE_BAD;
}
if (clock < min_clock)
@@ -680,7 +688,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
/* Firstly try getting EDID over DDC, if allowed and I2C channel
* is available.
*/
- if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+ if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c) {
@@ -695,7 +703,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
- dev_priv->VBIOS.pub.fp_no_ddc)) {
+ dev_priv->vbios.fp_no_ddc)) {
nv_connector->native_mode = drm_mode_duplicate(dev, &native);
goto out;
}
@@ -704,7 +712,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
* stored for the panel stored in them.
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
- !dev_priv->VBIOS.pub.fp_no_ddc) {
+ !dev_priv->vbios.fp_no_ddc) {
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
@@ -723,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
if (ret == 0)
goto out;
nv_connector->detected_encoder = nv_encoder;
- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+ nv_connector->native_mode = nouveau_connector_native_mode(connector);
list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
@@ -739,46 +747,66 @@ out:
}
int
-nouveau_connector_create(struct drm_device *dev, int index, int type)
+nouveau_connector_create(struct drm_device *dev,
+ struct dcb_connector_table_entry *dcb)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_encoder *encoder;
- int ret;
+ int ret, type;
NV_DEBUG_KMS(dev, "\n");
- nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
- if (!nv_connector)
- return -ENOMEM;
- nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
- connector = &nv_connector->base;
-
- switch (type) {
- case DRM_MODE_CONNECTOR_VGA:
+ switch (dcb->type) {
+ case DCB_CONNECTOR_NONE:
+ return 0;
+ case DCB_CONNECTOR_VGA:
NV_INFO(dev, "Detected a VGA connector\n");
+ type = DRM_MODE_CONNECTOR_VGA;
break;
- case DRM_MODE_CONNECTOR_DVID:
- NV_INFO(dev, "Detected a DVI-D connector\n");
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ NV_INFO(dev, "Detected a TV connector\n");
+ type = DRM_MODE_CONNECTOR_TV;
break;
- case DRM_MODE_CONNECTOR_DVII:
+ case DCB_CONNECTOR_DVI_I:
NV_INFO(dev, "Detected a DVI-I connector\n");
+ type = DRM_MODE_CONNECTOR_DVII;
break;
- case DRM_MODE_CONNECTOR_LVDS:
- NV_INFO(dev, "Detected a LVDS connector\n");
+ case DCB_CONNECTOR_DVI_D:
+ NV_INFO(dev, "Detected a DVI-D connector\n");
+ type = DRM_MODE_CONNECTOR_DVID;
break;
- case DRM_MODE_CONNECTOR_TV:
- NV_INFO(dev, "Detected a TV connector\n");
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ NV_INFO(dev, "Detected a HDMI connector\n");
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ case DCB_CONNECTOR_LVDS:
+ NV_INFO(dev, "Detected a LVDS connector\n");
+ type = DRM_MODE_CONNECTOR_LVDS;
break;
- case DRM_MODE_CONNECTOR_DisplayPort:
+ case DCB_CONNECTOR_DP:
NV_INFO(dev, "Detected a DisplayPort connector\n");
+ type = DRM_MODE_CONNECTOR_DisplayPort;
break;
- default:
- NV_ERROR(dev, "Unknown connector, this is not good.\n");
+ case DCB_CONNECTOR_eDP:
+ NV_INFO(dev, "Detected an eDP connector\n");
+ type = DRM_MODE_CONNECTOR_eDP;
break;
+ default:
+ NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
+ return -EINVAL;
}
+ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ if (!nv_connector)
+ return -ENOMEM;
+ nv_connector->dcb = dcb;
+ connector = &nv_connector->base;
+
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
@@ -786,55 +814,77 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* attach encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->connector != dcb->index)
+ continue;
+
+ if (get_slave_funcs(nv_encoder))
+ get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, " no encoders, ignoring\n");
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return 0;
+ }
+
/* Init DVI-I specific properties */
- if (type == DRM_MODE_CONNECTOR_DVII) {
+ if (dcb->type == DCB_CONNECTOR_DVI_I) {
drm_mode_create_dvi_i_properties(dev);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- if (type != DRM_MODE_CONNECTOR_LVDS)
+ if (dcb->type != DCB_CONNECTOR_LVDS)
nv_connector->use_dithering = false;
- if (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_LVDS ||
- type == DRM_MODE_CONNECTOR_DisplayPort) {
- nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
-
- drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
- nv_connector->scaling_mode);
- drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
- : DRM_MODE_DITHERING_OFF);
-
- } else {
- nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
-
- if (type == DRM_MODE_CONNECTOR_VGA &&
- dev_priv->card_type >= NV_50) {
+ switch (dcb->type) {
+ case DCB_CONNECTOR_VGA:
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
- }
-
- /* attach encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->connector != index)
- continue;
-
- if (get_slave_funcs(nv_encoder))
- get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+ /* fall-through */
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+ break;
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /* fall-through */
+ default:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ drm_connector_attach_property(connector,
+ dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ?
+ DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ break;
}
drm_sysfs_connector_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ if (dcb->type == DCB_CONNECTOR_LVDS) {
ret = nouveau_connector_create_lvds(dev, connector);
if (ret) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b8090e5ff..4ef38abc2d9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
-int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+int nouveau_connector_create(struct drm_device *,
+ struct dcb_connector_table_entry *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 49fa7b2d257e..cb1ce2a09162 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -40,6 +40,8 @@ struct nouveau_crtc {
int sharpness;
int last_dpms;
+ int cursor_saved_x, cursor_saved_y;
+
struct {
int cpp;
bool blanked;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db3698f16..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@@ -47,12 +49,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
+ seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
+ seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
+ }
seq_printf(m, "gpu fifo state:\n");
seq_printf(m, " get: 0x%08x\n",
nvchan_rd32(chan, chan->user_get));
seq_printf(m, " put: 0x%08x\n",
nvchan_rd32(chan, chan->user_put));
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib get: 0x%08x\n",
+ nvchan_rd32(chan, 0x88));
+ seq_printf(m, " ib put: 0x%08x\n",
+ nvchan_rd32(chan, 0x8c));
+ }
seq_printf(m, "last fence : %d\n", chan->fence.sequence);
seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -126,16 +139,29 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
+ struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
+
+ seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
+ return 0;
+}
+
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+ int i;
- seq_printf(m, "VRAM total: %dKiB\n",
- (int)(nouveau_mem_fb_amount(dev) >> 10));
+ for (i = 0; i < dev_priv->vbios.length; i++)
+ seq_printf(m, "%c", dev_priv->vbios.data[i]);
return 0;
}
static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc94391d71e..74e6b4ed12c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,16 +34,9 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct drm_device *dev = drm_fb->dev;
- if (drm_fb->fbdev)
- nouveau_fbcon_remove(dev, drm_fb);
-
- if (fb->nvbo) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(fb->nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (fb->nvbo)
+ drm_gem_object_unreference_unlocked(fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -64,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
- struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
- struct nouveau_framebuffer *fb;
int ret;
- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
- kfree(fb);
- return NULL;
+ return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
- fb->nvbo = nvbo;
- return &fb->base;
+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+ nouveau_fb->nvbo = nvbo;
+ return 0;
}
static struct drm_framebuffer *
@@ -92,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
- if (!fb) {
+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!nouveau_fb)
+ return NULL;
+
+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+ if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
- return fb;
+ return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .fb_changed = nouveau_fbcon_probe,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745af..65c441a1999f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
{
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_bo *pushbuf = chan->pushbuf_bo;
+
+ if (dev_priv->card_type == NV_50) {
+ const int ib_size = pushbuf->bo.mem.size / 2;
+
+ chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
+ chan->dma.ib_max = (ib_size / 8) - 1;
+ chan->dma.ib_put = 0;
+ chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+
+ chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
+ } else {
+ chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
+ }
+
chan->dma.put = 0;
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,106 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
return (val - chan->pushbuf_base) >> 2;
}
+void
+nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
+ int delta, int length)
+{
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ uint64_t offset = bo->bo.offset + delta;
+ int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+
+ BUG_ON(chan->dma.ib_free < 1);
+ nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+
+ chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+
+ DRM_MEMORYBARRIER();
+ /* Flush writes. */
+ nouveau_bo_rd32(pb, 0);
+
+ nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+ chan->dma.ib_free--;
+}
+
+static int
+nv50_dma_push_wait(struct nouveau_channel *chan, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+
+ while (chan->dma.ib_free < count) {
+ uint32_t get = nvchan_rd32(chan, 0x88);
+ if (get != prev_get) {
+ prev_get = get;
+ cnt = 0;
+ }
+
+ if ((++cnt & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (cnt > 100000)
+ return -EBUSY;
+ }
+
+ chan->dma.ib_free = get - chan->dma.ib_put;
+ if (chan->dma.ib_free <= 0)
+ chan->dma.ib_free += chan->dma.ib_max + 1;
+ }
+
+ return 0;
+}
+
+static int
+nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+ int ret;
+
+ ret = nv50_dma_push_wait(chan, slots + 1);
+ if (unlikely(ret))
+ return ret;
+
+ while (chan->dma.free < count) {
+ int get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+
+ return get;
+ }
+
+ if (get <= chan->dma.cur) {
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+ if (chan->dma.free >= count)
+ break;
+
+ FIRE_RING(chan);
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+ return get;
+ }
+ } while (get == 0);
+ chan->dma.cur = 0;
+ chan->dma.put = 0;
+ }
+
+ chan->dma.free = get - chan->dma.cur - 1;
+ }
+
+ return 0;
+}
+
int
-nouveau_dma_wait(struct nouveau_channel *chan, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
uint32_t prev_get = 0, cnt = 0;
int get;
+ if (chan->dma.ib_max)
+ return nv50_dma_wait(chan, slots, size);
+
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd655f93e..8b05c15866d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
#define NOUVEAU_DMA_DEBUG 0
#endif
+void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
+ int delta, int length);
+
/*
* There's a hw race condition where you can't jump to your PUT offset,
* to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
static __must_check inline int
RING_SPACE(struct nouveau_channel *chan, int size)
{
- if (chan->dma.free < size) {
- int ret;
+ int ret;
- ret = nouveau_dma_wait(chan, size);
- if (ret)
- return ret;
- }
+ ret = nouveau_dma_wait(chan, 1, size);
+ if (ret)
+ return ret;
chan->dma.free -= size;
return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- WRITE_PUT(chan->dma.cur);
+ if (chan->dma.ib_max) {
+ nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
+ (chan->dma.cur - chan->dma.put) << 2);
+ } else {
+ WRITE_PUT(chan->dma.cur);
+ }
+
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index f954ad93e81f..deeb21c6865c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
- for (;;) {
+ for (i = 0; i < 16; i++) {
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
@@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
break;
}
+ if (i == 16) {
+ NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
if (cmd & 1) {
if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
ret = -EREMOTEIO;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b84502..273770432298 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,14 +75,22 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable all acceleration");
+MODULE_PARM_DESC(noaccel, "Disable all acceleration");
int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
+int nouveau_override_conntype = 0;
+module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
+int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -135,7 +143,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
+int
nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -145,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -154,9 +161,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ NV_INFO(dev, "Disabling fbcon acceleration...\n");
+ nouveau_fbcon_save_disable_accel(dev);
+ NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -167,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
nouveau_bo_unpin(nouveau_fb->nvbo);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
+
NV_INFO(dev, "Evicting buffers...\n");
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
@@ -220,9 +235,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 1);
+ nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
- dev_priv->fbdev_info->flags = fbdev_flags;
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -233,21 +248,19 @@ out_abort:
return ret;
}
-static int
+int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@@ -308,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ NV_ERROR(dev, "Could not pin/map cursor.\n");
+ }
+
if (dev_priv->card_type < NV_50) {
nv04_display_restore(dev);
NVLockVgaCrtcs(dev, false);
} else
nv50_display_init(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_offset(nv_crtc,
+ nv_crtc->cursor.nvbo->bo.offset -
+ dev_priv->vm_vram_base);
+
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
+
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -322,13 +357,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 0);
+ nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
- dev_priv->fbdev_info->flags = fbdev_flags;
+
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
@@ -402,8 +438,10 @@ static int __init nouveau_init(void)
nouveau_modeset = 1;
}
- if (nouveau_modeset == 1)
+ if (nouveau_modeset == 1) {
driver.driver_features |= DRIVER_MODESET;
+ nouveau_register_dsm_handler();
+ }
return drm_init(&driver);
}
@@ -411,6 +449,7 @@ static int __init nouveau_init(void)
static void __exit nouveau_exit(void)
{
drm_exit(&driver);
+ nouveau_unregister_dsm_handler();
}
module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef37b71c..c69719106489 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 15
+#define DRIVER_PATCHLEVEL 16
#define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000
@@ -76,6 +76,7 @@ struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
u32 placements[3];
+ u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
struct list_head head;
@@ -83,6 +84,7 @@ struct nouveau_bo {
struct drm_file *reserved_by;
struct list_head entry;
int pbbo_index;
+ bool validate_mapped;
struct nouveau_channel *channel;
@@ -239,6 +241,11 @@ struct nouveau_channel {
int cur;
int put;
/* access via pushbuf_bo */
+
+ int ib_base;
+ int ib_max;
+ int ib_free;
+ int ib_put;
} dma;
uint32_t sw_subchannel[8];
@@ -513,6 +520,7 @@ struct drm_nouveau_private {
struct workqueue_struct *wq;
struct work_struct irq_work;
+ struct work_struct hpd_work;
struct list_head vbl_waiting;
@@ -533,6 +541,9 @@ struct drm_nouveau_private {
struct nouveau_engine engine;
struct nouveau_channel *channel;
+ /* For PFIFO and PGRAPH. */
+ spinlock_t context_switch_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_gpuobj *ramht;
uint32_t ramin_rsvd_vram;
@@ -544,12 +555,6 @@ struct drm_nouveau_private {
uint32_t ramro_offset;
uint32_t ramro_size;
- /* base physical adresses */
- uint64_t fb_phys;
- uint64_t fb_available_size;
- uint64_t fb_mappable_pages;
- uint64_t fb_aper_free;
-
struct {
enum {
NOUVEAU_GART_NONE = 0,
@@ -563,10 +568,6 @@ struct drm_nouveau_private {
struct nouveau_gpuobj *sg_ctxdma;
struct page *sg_dummy_page;
dma_addr_t sg_dummy_bus;
-
- /* nottm hack */
- struct drm_ttm_backend *sg_be;
- unsigned long sg_handle;
} gart_info;
/* nv10-nv40 tiling regions */
@@ -575,6 +576,16 @@ struct drm_nouveau_private {
spinlock_t lock;
} tile;
+ /* VRAM/fb configuration */
+ uint64_t vram_size;
+ uint64_t vram_sys_base;
+
+ uint64_t fb_phys;
+ uint64_t fb_available_size;
+ uint64_t fb_mappable_pages;
+ uint64_t fb_aper_free;
+ int fb_mtrr;
+
/* G8x/G9x virtual address space */
uint64_t vm_gart_base;
uint64_t vm_gart_size;
@@ -583,10 +594,6 @@ struct drm_nouveau_private {
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- uint64_t vram_sys_base;
-
- /* the mtrr covering the FB */
- int fb_mtrr;
struct mem_block *ramin_heap;
@@ -596,8 +603,7 @@ struct drm_nouveau_private {
struct list_head gpuobj_list;
- struct nvbios VBIOS;
- struct nouveau_bios_info *vbios;
+ struct nvbios vbios;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -606,21 +612,19 @@ struct drm_nouveau_private {
uint32_t dac_users[4];
struct nouveau_suspend_resume {
- uint32_t fifo_mode;
- uint32_t graph_ctx_control;
- uint32_t graph_state;
uint32_t *ramin_copy;
- uint64_t ramin_size;
} susres;
struct backlight_device *backlight;
- bool acpi_dsm;
struct nouveau_channel *evo;
struct {
struct dentry *channel_root;
} debugfs;
+
+ struct nouveau_fbdev *nfbdev;
+ struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@@ -674,6 +678,7 @@ extern int nouveau_uscript_tmds;
extern int nouveau_vram_pushbuf;
extern int nouveau_vram_notify;
extern int nouveau_fbpercrtc;
+extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
@@ -681,6 +686,10 @@ extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_override_conntype;
+
+extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
+extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -696,12 +705,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
-extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_resume(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -711,7 +714,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
struct drm_file *, int tail);
extern void nouveau_mem_takedown(struct mem_block **heap);
extern void nouveau_mem_free_block(struct mem_block *);
-extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern int nouveau_mem_detect(struct drm_device *dev);
extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
@@ -845,21 +848,20 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
/* nouveau_dma.c */
extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
-extern int nouveau_dma_wait(struct nouveau_channel *, int size);
+extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
-#ifdef CONFIG_ACPI
-extern int nouveau_hybrid_setup(struct drm_device *dev);
-extern bool nouveau_dsm_probe(struct drm_device *dev);
+#define ROM_BIOS_PAGE 4096
+#if defined(CONFIG_ACPI)
+void nouveau_register_dsm_handler(void);
+void nouveau_unregister_dsm_handler(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
-static inline int nouveau_hybrid_setup(struct drm_device *dev)
-{
- return 0;
-}
-static inline bool nouveau_dsm_probe(struct drm_device *dev)
-{
- return false;
-}
+static inline void nouveau_register_dsm_handler(void) {}
+static inline void nouveau_unregister_dsm_handler(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
#endif
/* nouveau_backlight.c */
@@ -928,6 +930,10 @@ extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
+/* nv50_fb.c */
+extern int nv50_fb_init(struct drm_device *);
+extern void nv50_fb_takedown(struct drm_device *);
+
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
@@ -1027,6 +1033,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
+extern int nv50_grctx_init(struct nouveau_grctx *);
/* nouveau_grctx.c */
extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1119,7 +1126,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
extern void nouveau_bo_unmap(struct nouveau_bo *);
-extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
+extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
+ uint32_t busy);
extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
@@ -1152,16 +1160,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
- struct drm_file *);
extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
@@ -1173,6 +1171,16 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv50_gpio.c */
+int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+ int clk, int *N, int *fN, int *M, int *P);
+
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index bc4a24029ed1..e1df8209cd0f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -47,6 +47,9 @@ struct nouveau_encoder {
union {
struct {
+ int mc_unknown;
+ uint32_t unk0;
+ uint32_t unk1;
int dpcd_version;
int link_nr;
int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa1949..d432134b71e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
- struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2efef3..257ea130ae13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -30,12 +30,12 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
-#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/screen_info.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct pci_dev *pdev = dev->pdev;
- int ramin;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return false;
-
- if (screen_info.lfb_base < pci_resource_start(pdev, 1))
- goto not_fb;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
- goto not_fb;
-
- return true;
-not_fb:
- ramin = 2;
- if (pci_resource_len(pdev, ramin) == 0) {
- ramin = 3;
- if (pci_resource_len(pdev, ramin) == 0)
- return false;
- }
-
- if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
- return false;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
- return false;
-
- return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height, uint32_t surface_depth,
- uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
- struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev->pdev;
+ struct device *device = &pdev->dev;
int size, ret;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
- if (!fb) {
+ info = framebuffer_alloc(0, device);
+ if (!info) {
ret = -ENOMEM;
- NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- nouveau_fb = nouveau_framebuffer(fb);
- *pfb = fb;
-
- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
- if (!info) {
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
ret = -ENOMEM;
goto out_unref;
}
- par = info->par;
- par->helper.funcs = &nouveau_fbcon_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
- if (ret)
- goto out_unref;
- dev_priv->fbdev_info = info;
+ info->par = nfbdev;
+
+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+ nouveau_fb = &nfbdev->nouveau_fb;
+ fb = &nouveau_fb->base;
+
+ /* setup helper */
+ nfbdev->helper.fb = fb;
+ nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+ info->fix.mmio_start = pci_resource_start(pdev, 1);
+ info->fix.mmio_len = pci_resource_len(pdev, 1);
/* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
- /* Some NVIDIA VBIOS' are stupid and decide to put the
- * framebuffer in the middle of the PRAMIN BAR for
- * whatever reason. We need to know the exact lfb_base
- * to get vesafb kicked off, and the only reliable way
- * we have left is to find out lfb_base the same way
- * vesafb did.
- */
- info->aperture_base = screen_info.lfb_base;
- info->aperture_size = screen_info.lfb_size;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
- info->aperture_size *= 65536;
- } else
-#endif
- {
- info->aperture_base = info->fix.mmio_start;
- info->aperture_size = info->fix.mmio_len;
+ info->apertures = dev_priv->apertures;
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
}
info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->nouveau_fb = nouveau_fb;
- par->dev = dev;
-
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -370,6 +300,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
nvbo->bo.offset, nvbo);
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unref:
@@ -378,46 +309,130 @@ out:
return ret;
}
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = nouveau_fbcon_create(nfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
- if (info) {
- struct nouveau_fbcon_par *par = info->par;
-
+ if (nfbdev->helper.fbdev) {
+ info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
- nouveau_bo_unmap(nouveau_fb->nvbo);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(nouveau_fb->nvbo->gem);
- nouveau_fb->nvbo = NULL;
- mutex_unlock(&dev->struct_mutex);
- if (par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ if (nouveau_fb->nvbo) {
+ nouveau_bo_unmap(nouveau_fb->nvbo);
+ drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+ nouveau_fb->nvbo = NULL;
+ }
+ drm_fb_helper_fini(&nfbdev->helper);
+ drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get,
+ .fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *nfbdev;
+ int ret;
+
+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!nfbdev)
+ return -ENOMEM;
+
+ nfbdev->dev = dev;
+ dev_priv->nfbdev = nfbdev;
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &nfbdev->helper,
+ nv_two_heads(dev) ? 2 : 1, 4);
+ if (ret) {
+ kfree(nfbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
+ return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->nfbdev)
+ return;
+
+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+ kfree(dev_priv->nfbdev);
+ dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c11..e7e12684c37e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
struct drm_fb_helper helper;
+ struct nouveau_framebuffer nouveau_fb;
+ struct list_head fbdev_list;
struct drm_device *dev;
- struct nouveau_framebuffer *nouveau_fb;
+ unsigned int saved_flags;
};
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3b..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
+
+ drm_gem_object_release(gem);
+ kfree(gem);
}
int
@@ -167,12 +170,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
@@ -182,40 +183,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint64_t flags;
+ uint32_t domains = valid_domains &
+ (write_domains ? write_domains : read_domains);
+ uint32_t pref_flags = 0, valid_flags = 0;
- if (!valid_domains || (!read_domains && !write_domains))
+ if (!domains)
return -EINVAL;
- if (write_domains) {
- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
- flags = TTM_PL_FLAG_VRAM;
- else
- if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
- (write_domains & NOUVEAU_GEM_DOMAIN_GART))
- flags = TTM_PL_FLAG_TT;
- else
- return -EINVAL;
- } else {
- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- bo->mem.mem_type == TTM_PL_VRAM)
- flags = TTM_PL_FLAG_VRAM;
- else
- if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
- (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
- bo->mem.mem_type == TTM_PL_TT)
- flags = TTM_PL_FLAG_TT;
- else
- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
- flags = TTM_PL_FLAG_VRAM;
- else
- flags = TTM_PL_FLAG_TT;
- }
+ if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+ valid_flags |= TTM_PL_FLAG_VRAM;
+
+ if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+ valid_flags |= TTM_PL_FLAG_TT;
+
+ if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ bo->mem.mem_type == TTM_PL_VRAM)
+ pref_flags |= TTM_PL_FLAG_VRAM;
+
+ else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ bo->mem.mem_type == TTM_PL_TT)
+ pref_flags |= TTM_PL_FLAG_TT;
+
+ else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
+ pref_flags |= TTM_PL_FLAG_VRAM;
+
+ else
+ pref_flags |= TTM_PL_FLAG_TT;
+
+ nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
- nouveau_bo_placement_set(nvbo, flags);
return 0;
}
@@ -243,6 +239,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
nouveau_fence_unref((void *)&prev_fence);
}
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+ nvbo->validate_mapped = false;
+ }
+
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +303,14 @@ retry:
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
drm_gem_object_unreference(gem);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail reserve\n");
return ret;
+ }
goto retry;
}
+ b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +340,10 @@ retry:
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail wait_cpu\n");
return ret;
+ }
goto retry;
}
}
@@ -351,6 +357,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
{
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
+ struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -362,39 +369,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail wait other chan\n");
return ret;
+ }
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail set_domain\n");
return ret;
+ }
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false);
+ false, false, false);
nvbo->channel = NULL;
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
+ }
- if (nvbo->bo.offset == b->presumed_offset &&
+ if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed_offset = nvbo->bo.offset;
- b->presumed_ok = 0;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
relocs++;
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
@@ -408,6 +422,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
+ struct drm_device *dev = chan->dev;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +433,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "validate_init\n");
return ret;
+ }
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -430,6 +448,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -437,6 +456,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -465,59 +485,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
- struct drm_nouveau_gem_pushbuf_bo *bo,
- unsigned nr_relocs, uint64_t ptr_relocs,
- unsigned nr_dwords, unsigned first_dword,
- uint32_t *pushbuf, bool is_iomem)
+nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+ struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
- struct drm_device *dev = chan->dev;
int ret = 0;
unsigned i;
- reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
- for (i = 0; i < nr_relocs; i++) {
+ for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
+ struct nouveau_bo *nvbo;
uint32_t data;
- if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
- r->reloc_index >= first_dword + nr_dwords) {
- NV_ERROR(dev, "Bad relocation %d\n", i);
- NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
- NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
+ if (unlikely(r->bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
- if (b->presumed_ok)
+ if (b->presumed.valid)
continue;
+ if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc container bo index invalid\n");
+ ret = -EINVAL;
+ break;
+ }
+ nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
+
+ if (unlikely(r->reloc_bo_offset + 4 >
+ nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+ NV_ERROR(dev, "reloc outside of bo\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "failed kmap for reloc\n");
+ break;
+ }
+ nvbo->validate_mapped = true;
+ }
+
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
- data = b->presumed_offset + r->data;
+ data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
- data = (b->presumed_offset + r->data) >> 32;
+ data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
- if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+ if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
- if (is_iomem)
- iowrite32_native(data, (void __force __iomem *)
- &pushbuf[r->reloc_index]);
- else
- pushbuf[r->reloc_index] = data;
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (ret) {
+ NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
+ break;
+ }
+
+ nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
kfree(reloc);
@@ -528,127 +571,50 @@ int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_pushbuf *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct drm_nouveau_gem_pushbuf_push *push;
+ struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
- struct nouveau_fence* fence = 0;
- uint32_t *pushbuf = NULL;
- int ret = 0, do_reloc = 0, i;
+ struct nouveau_fence *fence = 0;
+ int i, j, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
- if (req->nr_dwords >= chan->dma.max ||
- req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
- chan->dma.max - 1);
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
- return -EINVAL;
- }
-
- pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
- if (IS_ERR(pushbuf))
- return PTR_ERR(pushbuf);
-
- bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo)) {
- kfree(pushbuf);
- return PTR_ERR(bo);
- }
-
- mutex_lock(&dev->struct_mutex);
-
- /* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
- req->nr_buffers, &op, &do_reloc);
- if (ret)
- goto out;
-
- /* Apply any relocations that are required */
- if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
- bo, req->nr_relocs,
- req->relocs,
- req->nr_dwords, 0,
- pushbuf, false);
- if (ret)
- goto out;
- }
-
- /* Emit push buffer to the hw
- */
- ret = RING_SPACE(chan, req->nr_dwords);
- if (ret)
- goto out;
-
- OUT_RINGp(chan, pushbuf, req->nr_dwords);
+ req->vram_available = dev_priv->fb_aper_free;
+ req->gart_available = dev_priv->gart_info.aper_free;
+ if (unlikely(req->nr_push == 0))
+ goto out_next;
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret) {
- NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
- WIND_RING(chan);
- goto out;
+ if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
+ NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
+ req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ return -EINVAL;
}
- if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- if (ret) {
- for (i = 0; i < req->nr_dwords; i++)
- NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
- NV_ERROR(dev, "^^ above push buffer is fail :(\n");
- }
+ if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
+ NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
+ req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ return -EINVAL;
}
-out:
- validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
- kfree(pushbuf);
- kfree(bo);
- return ret;
-}
-
-#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
-
-int
-nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
- struct nouveau_channel *chan;
- struct drm_gem_object *gem;
- struct nouveau_bo *pbbo;
- struct validate_op op;
- struct nouveau_fence* fence = 0;
- int i, ret = 0, do_reloc = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
-
- if (unlikely(req->handle == 0))
- goto out_next;
-
- if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
+ if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
+ NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
+ req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return -EINVAL;
}
+ push = u_memcpya(req->push, req->nr_push, sizeof(*push));
+ if (IS_ERR(push))
+ return PTR_ERR(push);
+
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo))
+ if (IS_ERR(bo)) {
+ kfree(push);
return PTR_ERR(bo);
+ }
mutex_lock(&dev->struct_mutex);
@@ -660,122 +626,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
goto out;
}
- /* Validate DMA push buffer */
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem) {
- NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
- ret = -EINVAL;
- goto out;
- }
- pbbo = nouveau_gem_object(gem);
-
- if ((req->offset & 3) || req->nr_dwords < 2 ||
- (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
- (unsigned long)req->nr_dwords >
- ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
- NV_ERROR(dev, "pb call misaligned or out of bounds: "
- "%d + %d * 4 > %ld\n",
- req->offset, req->nr_dwords, pbbo->bo.mem.size);
- ret = -EINVAL;
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
- chan->fence.sequence);
- if (ret) {
- NV_ERROR(dev, "resv pb: %d\n", ret);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
- ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
- if (ret) {
- NV_ERROR(dev, "validate pb: %d\n", ret);
- ttm_bo_unreserve(&pbbo->bo);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- list_add_tail(&pbbo->entry, &op.both_list);
-
- /* If presumed return address doesn't match, we need to map the
- * push buffer and fix it..
- */
- if (!PUSHBUF_CAL) {
- uint32_t retaddy;
-
- if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
- ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
- if (ret) {
- NV_ERROR(dev, "jmp_space: %d\n", ret);
- goto out;
- }
- }
-
- retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
- retaddy |= 0x20000000;
- if (retaddy != req->suffix0) {
- req->suffix0 = retaddy;
- do_reloc = 1;
- }
- }
-
/* Apply any relocations that are required */
if (do_reloc) {
- void *pbvirt;
- bool is_iomem;
- ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
- &pbbo->kmap);
+ ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
- NV_ERROR(dev, "kmap pb: %d\n", ret);
+ NV_ERROR(dev, "reloc apply: %d\n", ret);
goto out;
}
+ }
- pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
- req->nr_relocs,
- req->relocs,
- req->nr_dwords,
- req->offset / 4,
- pbvirt, is_iomem);
-
- if (!PUSHBUF_CAL) {
- nouveau_bo_wr32(pbbo,
- req->offset / 4 + req->nr_dwords - 2,
- req->suffix0);
- }
-
- ttm_bo_kunmap(&pbbo->kmap);
+ if (chan->dma.ib_max) {
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
if (ret) {
- NV_ERROR(dev, "reloc apply: %d\n", ret);
+ NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
}
- }
- if (PUSHBUF_CAL) {
- ret = RING_SPACE(chan, 2);
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+
+ nv50_dma_push(chan, nvbo, push[i].offset,
+ push[i].length);
+ }
+ } else
+ if (dev_priv->card_type >= NV_20) {
+ ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 2);
- OUT_RING(chan, 0);
+
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 2);
+ OUT_RING(chan, 0);
+ }
} else {
- ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+ ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_ERROR(dev, "jmp_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 0x20000000);
- OUT_RING(chan, 0);
- /* Space the jumps apart with NOPs. */
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+ uint32_t cmd;
+
+ cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ cmd |= 0x20000000;
+ if (unlikely(cmd != req->suffix0)) {
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0,
+ nvbo->bo.mem.
+ num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ WIND_RING(chan);
+ goto out;
+ }
+ nvbo->validate_mapped = true;
+ }
+
+ nouveau_bo_wr32(nvbo, (push[i].offset +
+ push[i].length - 8) / 4, cmd);
+ }
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 0x20000000);
OUT_RING(chan, 0);
+ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+ OUT_RING(chan, 0);
+ }
}
ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +718,14 @@ out:
nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
+ kfree(push);
out_next:
- if (PUSHBUF_CAL) {
+ if (chan->dma.ib_max) {
+ req->suffix0 = 0x00000000;
+ req->suffix1 = 0x00000000;
+ } else
+ if (dev_priv->card_type >= NV_20) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -804,19 +737,6 @@ out_next:
return ret;
}
-int
-nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
-
- req->vram_available = dev_priv->fb_aper_free;
- req->gart_available = dev_priv->gart_info.aper_free;
-
- return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
-}
-
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -831,74 +751,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
}
int
-nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
- return -EINVAL;
- }
-
- if (!DRM_SUSER(DRM_CURPROC))
- return -EPERM;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
- if (ret)
- goto out;
-
- req->offset = nvbo->bo.offset;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- req->domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
-out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- int ret;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
-
- ret = nouveau_bo_unpin(nouveau_gem_object(gem));
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -935,9 +787,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
}
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -965,9 +815,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
ret = 0;
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -986,9 +834,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -EINVAL;
ret = nouveau_gem_info(gem, req);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index c7ebec696747..f731c5f60536 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -23,6 +23,7 @@
*/
#include <linux/firmware.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "nouveau_drv.h"
@@ -67,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxprog) {
NV_ERROR(dev, "OOM copying ctxprog\n");
release_firmware(fw);
return -ENOMEM;
}
- memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -96,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxvals) {
NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
}
- memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792a5c96..7855b35effc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d28122..316a3c7e6eb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
- if (index > DCB_MAX_NUM_I2C_ENTRIES)
+ if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->bdcb.dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
- return NULL;
+ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ uint32_t reg = 0xe500, val;
+
+ if (i2c->port_type == 6) {
+ reg += i2c->read * 0x50;
+ val = 0x2002;
+ } else {
+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ val = 0xe001;
+ }
+
+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
- return bios->bdcb.dcb.i2c[index].chan;
+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+ return NULL;
+ return i2c->chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f69d6b1..53360f156063 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
if (dev_priv->card_type == NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
}
@@ -311,6 +312,31 @@ nouveau_print_bitfield_names_(uint32_t value,
#define nouveau_print_bitfield_names(val, namelist) \
nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
+struct nouveau_enum_names {
+ uint32_t value;
+ const char *name;
+};
+
+static void
+nouveau_print_enum_names_(uint32_t value,
+ const struct nouveau_enum_names *namelist,
+ const int namelist_len)
+{
+ /*
+ * Caller must have already printed the KERN_* log level for us.
+ * Also the caller is responsible for adding the newline.
+ */
+ int i;
+ for (i = 0; i < namelist_len; ++i) {
+ if (value == namelist[i].value) {
+ printk("%s", namelist[i].name);
+ return;
+ }
+ }
+ printk("unknown value 0x%08x", value);
+}
+#define nouveau_print_enum_names(val, namelist) \
+ nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
static int
nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +453,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
- NV_INFO(dev, "%s - nSource:", id);
- nouveau_print_bitfield_names(nsource, nsource_names);
- printk(", nStatus:");
- if (dev_priv->card_type < NV_10)
- nouveau_print_bitfield_names(nstatus, nstatus_names);
- else
- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
- printk("\n");
+ if (dev_priv->card_type < NV_50) {
+ NV_INFO(dev, "%s - nSource:", id);
+ nouveau_print_bitfield_names(nsource, nsource_names);
+ printk(", nStatus:");
+ if (dev_priv->card_type < NV_10)
+ nouveau_print_bitfield_names(nstatus, nstatus_names);
+ else
+ nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+ printk("\n");
+ }
NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
"Data 0x%08x:0x%08x\n",
@@ -578,27 +606,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
}
static void
+nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t trap[6];
+ int i, ch;
+ uint32_t idx = nv_rd32(dev, 0x100c90);
+ if (idx & 0x80000000) {
+ idx &= 0xffffff;
+ if (display) {
+ for (i = 0; i < 6; i++) {
+ nv_wr32(dev, 0x100c90, idx | i << 24);
+ trap[i] = nv_rd32(dev, 0x100c94);
+ }
+ for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (trap[1] == chan->ramin->instance >> 12)
+ break;
+ }
+ NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
+ name, (trap[5]&0x100?"read":"write"),
+ trap[5]&0xff, trap[4]&0xffff,
+ trap[3]&0xffff, trap[0], trap[2], ch);
+ }
+ nv_wr32(dev, 0x100c90, idx | 0x80000000);
+ } else if (display) {
+ NV_INFO(dev, "%s - no VM fault?\n", name);
+ }
+}
+
+static struct nouveau_enum_names nv50_mp_exec_error_names[] =
+{
+ { 3, "STACK_UNDERFLOW" },
+ { 4, "QUADON_ACTIVE" },
+ { 8, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x40, "BREAKPOINT" },
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh= nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_print_enum_names(status,
+ nv50_mp_exec_error_names);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ nv50_pfb_vm_trap(dev, display, name);
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x00010000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x00010000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ nv50_pfb_vm_trap(dev, display, name);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static void
+nv50_pgraph_trap_handler(struct drm_device *dev)
+{
+ struct nouveau_pgraph_trap trap;
+ uint32_t status = nv_rd32(dev, 0x400108);
+ uint32_t ustatus;
+ int display = nouveau_ratelimit();
+
+
+ if (!status && display) {
+ nouveau_graph_trap_info(dev, &trap);
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+ nv_wr32(dev, 0x400500, 0);
+ if (nv_rd32(dev, 0x400808) & 0x80000000) {
+ if (display) {
+ if (nouveau_graph_trapped_channel(dev, &trap.channel))
+ trap.channel = -1;
+ trap.class = nv_rd32(dev, 0x400814);
+ trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
+ trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
+ trap.data = nv_rd32(dev, 0x40080c);
+ trap.data2 = nv_rd32(dev, 0x400810);
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
+ }
+ nv_wr32(dev, 0x400808, 0);
+ } else if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
+ }
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus & 0x00000002) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+ nv_wr32(dev, 0x400500, 0);
+ if (nv_rd32(dev, 0x40084c) & 0x80000000) {
+ if (display) {
+ if (nouveau_graph_trapped_channel(dev, &trap.channel))
+ trap.channel = -1;
+ trap.class = nv_rd32(dev, 0x400814);
+ trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
+ trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
+ trap.data = nv_rd32(dev, 0x40085c);
+ trap.data2 = 0;
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
+ }
+ nv_wr32(dev, 0x40084c, 0);
+ } else if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
+ }
+ ustatus &= ~0x00000002;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ }
+
+ /* TRAPs other than dispatch use the "normal" trap regs. */
+ if (status && display) {
+ nouveau_graph_trap_info(dev, &trap);
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP", &trap);
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus & 0x00000002) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+ ustatus &= ~0x00000002;
+ }
+ if (ustatus & 0x00000004) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+ ustatus &= ~0x00000004;
+ }
+ NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804),
+ nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c),
+ nv_rd32(dev, 0x406810));
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00),
+ nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c),
+ nv_rd32(dev, 0x400c10));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804),
+ nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c),
+ nv_rd32(dev, 0x401810));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x405800),
+ nv_rd32(dev, 0x405804),
+ nv_rd32(dev, 0x405808),
+ nv_rd32(dev, 0x40580c),
+ nv_rd32(dev, 0x405810),
+ nv_rd32(dev, 0x405814),
+ nv_rd32(dev, 0x40581c));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH_TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH_TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH_TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
+ status);
+ nv_wr32(dev, 0x400108, status);
+ }
+}
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+static struct nouveau_enum_names nv50_data_error_names[] =
+{
+ { 4, "INVALID_VALUE" },
+ { 5, "INVALID_ENUM" },
+ { 8, "INVALID_OBJECT" },
+ { 0xc, "INVALID_BITFIELD" },
+ { 0x28, "MP_NO_REG_SPACE" },
+ { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
+};
+
+static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
uint32_t status;
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
+ /* NOTIFY: You've set a NOTIFY an a command and it's done. */
if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_NOTIFY", &trap);
status &= ~0x00000001;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
}
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
+ * when you write 0x200 to 0x50c0 method 0x31c. */
+ if (status & 0x00000002) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_COMPUTE_QUERY", &trap);
+ status &= ~0x00000002;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
+ }
+
+ /* Unknown, never seen: 0x4 */
+ /* ILLEGAL_MTHD: You used a wrong method for this class. */
+ if (status & 0x00000010) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ if (unhandled && nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_ILLEGAL_MTHD", &trap);
status &= ~0x00000010;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
}
+ /* ILLEGAL_CLASS: You used a wrong class. */
+ if (status & 0x00000020) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_ILLEGAL_CLASS", &trap);
+ status &= ~0x00000020;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
+ }
+
+ /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
+ if (status & 0x00000040) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_DOUBLE_NOTIFY", &trap);
+ status &= ~0x00000040;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
+ }
+
+ /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
if (status & 0x00001000) {
nv_wr32(dev, 0x400500, 0x00000000);
nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1116,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
}
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ /* BUFFER_NOTIFY: Your m2mf transfer finished */
+ if (status & 0x00010000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_BUFFER_NOTIFY", &trap);
+ status &= ~0x00010000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
+ }
+ /* DATA_ERROR: Invalid value for this method, or invalid
+ * state in current PGRAPH context for this operation */
+ if (status & 0x00100000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit()) {
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_DATA_ERROR", &trap);
+ NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
+ nouveau_print_enum_names(nv_rd32(dev, 0x400110),
+ nv50_data_error_names);
+ printk("\n");
+ }
status &= ~0x00100000;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
}
+ /* TRAP: Something bad happened in the middle of command
+ * execution. Has a billion types, subtypes, and even
+ * subsubtypes. */
if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x408900,
- nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08,
- nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x409900,
- nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08,
- nv_rd32(dev, 0x409e08) | 0xc0000000);
-
+ nv50_pgraph_trap_handler(dev);
status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
}
+ /* Unknown, never seen: 0x00400000 */
+
+ /* SINGLE_STEP: Happens on every method if you turned on
+ * single stepping in 40008c */
+ if (status & 0x01000000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_SINGLE_STEP", &trap);
+ status &= ~0x01000000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
+ }
+
+ /* 0x02000000 happens when you pause a ctxprog...
+ * but the only way this can happen that I know is by
+ * poking the relevant MMIO register, and we don't
+ * do that. */
+
if (status) {
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
status);
@@ -672,7 +1185,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
@@ -690,16 +1204,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, fbdev_flags = 0;
+ uint32_t status;
+ unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
if (!status)
return IRQ_NONE;
- if (dev_priv->fbdev_info) {
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
- }
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
@@ -730,8 +1242,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
- if (dev_priv->fbdev_info)
- dev_priv->fbdev_info->flags = fbdev_flags;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2dc09dbd817d..c1fd42b0dad1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
return -EBUSY;
}
+ nv_wr32(dev, 0x100c80, 0x00040001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00060001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
return 0;
}
@@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00040001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00060001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
}
}
@@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev)
}
}
-/*XXX won't work on BSD because of pci_read_config_dword */
static uint32_t
-nouveau_mem_fb_amount_igp(struct drm_device *dev)
+nouveau_mem_detect_nv04(struct drm_device *dev)
+{
+ uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
+
+ if (boot0 & 0x00000100)
+ return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+
+ switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
+ case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ return 32 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ return 16 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ return 8 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ return 4 * 1024 * 1024;
+ }
+
+ return 0;
+}
+
+static uint32_t
+nouveau_mem_detect_nforce(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pci_dev *bridge;
@@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
return 0;
}
- if (dev_priv->flags&NV_NFORCE) {
+ if (dev_priv->flags & NV_NFORCE) {
pci_read_config_dword(bridge, 0x7C, &mem);
return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
} else
- if (dev_priv->flags&NV_NFORCE2) {
+ if (dev_priv->flags & NV_NFORCE2) {
pci_read_config_dword(bridge, 0x84, &mem);
return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
}
@@ -477,50 +526,33 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
}
/* returns the amount of FB ram in bytes */
-uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+int
+nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t boot0;
-
- switch (dev_priv->card_type) {
- case NV_04:
- boot0 = nv_rd32(dev, NV03_BOOT_0);
- if (boot0 & 0x00000100)
- return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
-
- switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
- case NV04_BOOT_0_RAM_AMOUNT_32MB:
- return 32 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_16MB:
- return 16 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_8MB:
- return 8 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_4MB:
- return 4 * 1024 * 1024;
- }
- break;
- case NV_10:
- case NV_20:
- case NV_30:
- case NV_40:
- case NV_50:
- default:
- if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
- return nouveau_mem_fb_amount_igp(dev);
- } else {
- uint64_t mem;
- mem = (nv_rd32(dev, NV04_FIFO_DATA) &
- NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
- NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
- return mem * 1024 * 1024;
- }
- break;
+
+ if (dev_priv->card_type == NV_04) {
+ dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
+ } else
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
+ } else {
+ dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
+ dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
+ dev_priv->vram_sys_base <<= 12;
}
- NV_ERROR(dev,
- "Unable to detect video ram size. Please report your setup to "
- DRIVER_EMAIL "\n");
- return 0;
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ if (dev_priv->vram_sys_base) {
+ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ dev_priv->vram_sys_base);
+ }
+
+ if (dev_priv->vram_size)
+ return 0;
+ return -ENOMEM;
}
#if __OS_HAS_AGP
@@ -631,15 +663,12 @@ nouveau_mem_init(struct drm_device *dev)
spin_lock_init(&dev_priv->ttm.bo_list_lock);
spin_lock_init(&dev_priv->tile.lock);
- dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
-
+ dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
- NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
-
/* remove reserved space at end of vram from available amount */
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc087f9b1..9537f3e30115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
chan->notifier_bo = ntfy;
out_err:
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(ntfy->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (ret)
+ drm_gem_object_unreference_unlocked(ntfy->gem);
return ret;
}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
- drm_gem_object_unreference(chan->notifier_bo->gem);
mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
nouveau_mem_takedown(&chan->notifier_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310e41be..6ca80a3fe70d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ed1590577b6c..1d6ee8b55154 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,6 +1,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include <linux/pagemap.h>
+#include <linux/slab.h>
#define NV_CTXDMA_PAGE_SHIFT 12
#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
@@ -171,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
}
dev_priv->engine.instmem.finish_access(nvbe->dev);
+ if (dev_priv->card_type == NV_50) {
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+ }
+
nvbe->bound = false;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af5b05e..b02a231d6937 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -24,17 +24,19 @@
*/
#include <linux/swab.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
-static int nouveau_stub_init(struct drm_device *dev) { return 0; }
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -276,8 +278,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nouveau_stub_init;
- engine->fb.takedown = nouveau_stub_takedown;
+ engine->fb.init = nv50_fb_init;
+ engine->fb.takedown = nv50_fb_takedown;
engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
@@ -340,7 +342,7 @@ nouveau_card_init_channel(struct drm_device *dev)
gpuobj = NULL;
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, nouveau_mem_fb_amount(dev),
+ 0, dev_priv->vram_size,
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
&gpuobj);
if (ret)
@@ -371,6 +373,33 @@ out_err:
return ret;
}
+static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ nouveau_pci_resume(pdev);
+ drm_kms_helper_poll_enable(dev);
+ } else {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ drm_kms_helper_poll_disable(dev);
+ nouveau_pci_suspend(pdev, pmm);
+ }
+}
+
+static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -384,6 +413,8 @@ nouveau_card_init(struct drm_device *dev)
return 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +422,7 @@ nouveau_card_init(struct drm_device *dev)
goto out;
engine = &dev_priv->engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+ spin_lock_init(&dev_priv->context_switch_lock);
/* Parse BIOS tables / Run init tables if card not POSTed */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -399,6 +431,10 @@ nouveau_card_init(struct drm_device *dev)
goto out;
}
+ ret = nouveau_mem_detect(dev);
+ if (ret)
+ goto out_bios;
+
ret = nouveau_gpuobj_early_init(dev);
if (ret)
goto out_bios;
@@ -474,7 +510,7 @@ nouveau_card_init(struct drm_device *dev)
else
ret = nv04_display_create(dev);
if (ret)
- goto out_irq;
+ goto out_channel;
}
ret = nouveau_backlight_init(dev);
@@ -483,11 +519,18 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_helper_initial_config(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
+ }
return 0;
+out_channel:
+ if (dev_priv->channel) {
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ }
out_irq:
drm_irq_uninstall(dev);
out_fifo:
@@ -505,6 +548,7 @@ out_mc:
out_gpuobj:
nouveau_gpuobj_takedown(dev);
out_mem:
+ nouveau_sgdma_takedown(dev);
nouveau_mem_close(dev);
out_instmem:
engine->instmem.takedown(dev);
@@ -525,6 +569,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@@ -599,6 +644,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct apertures_struct *aper = alloc_apertures(3);
+ if (!aper)
+ return NULL;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+ return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool primary = false;
+ dev_priv->apertures = nouveau_get_apertures(dev);
+ if (!dev_priv->apertures)
+ return -ENOMEM;
+
+#ifdef CONFIG_X86
+ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+
+ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+ return 0;
+}
+
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -617,11 +704,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
-
- if (dev_priv->acpi_dsm)
- nouveau_hybrid_setup(dev);
-
dev_priv->wq = create_workqueue("nouveau");
if (!dev_priv->wq)
return -EINVAL;
@@ -691,29 +773,30 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
- /* map larger RAMIN aperture on NV40 cards */
- dev_priv->ramin = NULL;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
if (dev_priv->card_type >= NV_40) {
int ramin_bar = 2;
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
ramin_bar = 3;
dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
- dev_priv->ramin = ioremap(
- pci_resource_start(dev->pdev, ramin_bar),
+ dev_priv->ramin =
+ ioremap(pci_resource_start(dev->pdev, ramin_bar),
dev_priv->ramin_size);
if (!dev_priv->ramin) {
- NV_ERROR(dev, "Failed to init RAMIN mapping, "
- "limited instance memory available\n");
+ NV_ERROR(dev, "Failed to PRAMIN BAR");
+ return -ENOMEM;
}
- }
-
- /* On older cards (or if the above failed), create a map covering
- * the BAR0 PRAMIN aperture */
- if (!dev_priv->ramin) {
+ } else {
dev_priv->ramin_size = 1 * 1024 * 1024;
dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
- dev_priv->ramin_size);
+ dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
return -ENOMEM;
@@ -761,6 +844,8 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else
@@ -776,13 +861,6 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int
-nouveau_ioctl_card_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return nouveau_card_init(dev);
-}
-
int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -833,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_PTIMER_TIME:
+ getparam->value = dev_priv->engine.timer.read(dev);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143ed97c1..eba687f1099e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_framebuffer *fb = crtc->fb;
/* Calculate our timings */
- int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
- int horizStart = (mode->crtc_hsync_start >> 3) - 1;
- int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
+ int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
+ int horizStart = (mode->crtc_hsync_start >> 3) + 1;
+ int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
int horizTotal = (mode->crtc_htotal >> 3) - 5;
int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 89a91b9d8b25..aaf3de3bc816 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
static void
nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
NV_PRAMDAC_CU_START_POS,
XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15d70da..1cb19e3acb55 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
if (dcb->type == OUTPUT_TV) {
testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
} else {
testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
- if (dev_priv->vbios->dactestval)
- testval = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval)
+ testval = dev_priv->vbios.dactestval;
}
saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875bdb6a..41634d4752fe 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
if (!nv_gf4_disp_arch(dev) ||
(output_mode->hsync_start - output_mode->hdisplay) >=
- dev_priv->vbios->digital_min_front_porch)
+ dev_priv->vbios.digital_min_front_porch)
regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
else
- regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215fa5b9..c7898b4f6dfb 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- uint16_t connector[16] = { 0 };
int i, ret;
NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
if (ret)
continue;
-
- connector[dcbent->connector] |= (1 << dcbent->type);
}
- for (i = 0; i < dcb->entries; i++) {
- struct dcb_entry *dcbent = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[dcbent->connector];
- if (!(encoders & (1 << dcbent->type)))
- continue;
- connector[dcbent->connector] = 0;
-
- switch (dcbent->type) {
- case OUTPUT_ANALOG:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_VGA;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_TMDS:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_DVID;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_LVDS:
- type = DRM_MODE_CONNECTOR_LVDS;
-#if 0
- /* don't create i2c adapter when lvds ddc not allowed */
- if (dcbent->lvdsconf.use_straps_for_mode ||
- dev_priv->vbios->fp_no_ddc)
- i2c_index = 0xf;
-#endif
- break;
- case OUTPUT_TV:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- default:
- type = DRM_MODE_CONNECTOR_Unknown;
- continue;
- }
-
- nouveau_connector_create(dev, dcbent->connector, type);
- }
+ for (i = 0; i < dcb->connector.entries; i++)
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
/* Save previous state */
NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caabd5c3..1eeac4fae73d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
- dsize = (width * image->height) >> 5;
+ width = ALIGN(image->width, 8);
+ dsize = ALIGN(width * image->height, 32) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
while (dsize) {
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
0x009f : 0x005f, NvImageBlit);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b8c9b0..66fe55983b6e 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
/* Setup initial state */
dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986ea65a..618355e9cdd5 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
return 0;
}
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bits 20-22: dither mode
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ * - bit 26: surface_src/surface_zeta valid
+ * - bit 27: pattern valid
+ * - bit 28: rop valid
+ * - bit 29: beta1 valid
+ * - bit 30: beta4 valid
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
uint32_t tmp;
tmp = nv_ri32(dev, instance);
- tmp &= ~0x00038000;
- tmp |= ((data & 7) << 15);
+ tmp &= ~mask;
+ tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ uint32_t tmp, ctx1;
+ int class, op, valid = 1;
+
+ ctx1 = nv_ri32(dev, instance);
+ class = ctx1 & 0xff;
+ op = (ctx1 >> 15) & 7;
+ tmp = nv_ri32(dev, instance + 0xc);
+ tmp &= ~mask;
+ tmp |= value;
+ nv_wi32(dev, instance + 0xc, tmp);
+
+ /* check for valid surf2d/surf_dst/surf_color */
+ if (!(tmp & 0x02000000))
+ valid = 0;
+ /* check for valid surf_src/surf_zeta */
+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+ valid = 0;
+
+ switch (op) {
+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+ case 0:
+ case 3:
+ break;
+ /* ROP_AND: requires pattern and rop */
+ case 1:
+ if (!(tmp & 0x18000000))
+ valid = 0;
+ break;
+ /* BLEND_AND: requires beta1 */
+ case 2:
+ if (!(tmp & 0x20000000))
+ valid = 0;
+ break;
+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+ case 4:
+ case 5:
+ if (!(tmp & 0x40000000))
+ valid = 0;
+ break;
+ }
+
+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (data > 5)
+ return 1;
+ /* Old versions of the objects only accept first three operations. */
+ if (data > 2 && grclass < 0x40)
+ return 1;
+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ /* changing operation changes set of objects needed for validation */
+ nv04_graph_set_ctx_val(chan, 0, 0);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x40053c, min);
+ nv_wr32(chan->dev, 0x400544, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x400540, min);
+ nv_wr32(chan->dev, 0x400548, max);
return 0;
}
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ case 0x52:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x18:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x44:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ return 0;
+ case 0x43:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ return 0;
+ case 0x12:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ return 0;
+ case 0x72:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x58:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x59:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x5a:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x5b:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x2000, 0);
+ return 0;
+ case 0x19:
+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x1000, 0);
+ return 0;
+ /* Yes, for some reason even the old versions of objects
+ * accept 0x57 and not 0x17. Consistency be damned.
+ */
+ case 0x57:
+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ return 0;
+ }
+ return 1;
+}
+
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+ { 0x0184, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+ { 0x0188, nv04_graph_mthd_bind_chroma },
+ { 0x018c, nv04_graph_mthd_bind_clip },
+ { 0x0190, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ {},
+};
+
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, NULL },
- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0038, false, NULL }, /* dvd subpicture */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+ { 0x0064, false, NULL }, /* nv05 iifc */
+ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+ { 0x0065, false, NULL }, /* nv05 ifc */
+ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+ { 0x0066, false, NULL }, /* nv05 sifc */
+ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
+ { 0x0018, false, NULL }, /* nv01 pattern */
+ { 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, NULL }, /* surf3d */
+ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x0017, false, NULL }, /* nv01 chroma */
+ { 0x0057, false, NULL }, /* nv04 chroma */
+ { 0x0058, false, NULL }, /* surf_dst */
+ { 0x0059, false, NULL }, /* surf_src */
+ { 0x005a, false, NULL }, /* surf_color */
+ { 0x005b, false, NULL }, /* surf_zeta */
+ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099e9c42..c4e3404337d4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+ adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e49b6ee..74c880374fb9 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
!enc->crtc &&
nv04_dfp_get_bound_head(dev, dcb) == head) {
nv04_dfp_bind_head(dev, dcb, head ^ 1,
- dev_priv->VBIOS.fp.dual_link);
+ dev_priv->vbios.fp.dual_link);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19ccb8b41..500ccfd3a0b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV40_RAMFC(chan->id);
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
@@ -273,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
default:
nv_wr32(dev, 0x2230, 0);
nv_wr32(dev, NV40_PFIFO_RAMFC,
- ((nouveau_mem_fb_amount(dev) - 512 * 1024 +
+ ((dev_priv->vram_size - 512 * 1024 +
dev_priv->ramfc_offset) >> 16) | (3 << 16));
break;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 53e8afe1dcd1..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
if (!dev_priv->engine.graph.ctxprog) {
struct nouveau_grctx ctx = {};
- uint32_t cp[256];
+ uint32_t *cp;
+
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+ kfree(cp);
}
/* No context present currently */
@@ -335,6 +341,27 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
+ /* Tiling related stuff. */
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x4a:
+ nv_wr32(dev, 0x400bc4, 0x1003d888);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b500);
+ break;
+ case 0x46:
+ nv_wr32(dev, 0x400bc4, 0x0000e024);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b520);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ nv_wr32(dev, 0x400bc4, 0x1003d888);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b540);
+ break;
+ default:
+ break;
+ }
+
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c31f543..9b5c97469588 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
/* TODO:
* - get vs count from 0x1540
- * - document unimplemented bits compared to nvidia
- * - nsource handling
- * - R0 & 0x0200 handling
- * - single-vs handling
- * - 400314 bit 0
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 000000000000..2cdc2bfe7179
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P)
+{
+ struct nouveau_pll_vals pll_vals;
+ int ret;
+
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+ if (ret <= 0)
+ return ret;
+
+ *N1 = pll_vals.N1;
+ *M1 = pll_vals.M1;
+ *N2 = pll_vals.N2;
+ *M2 = pll_vals.M2;
+ *P = pll_vals.log2P;
+ return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N, int *fN, int *M, int *P)
+{
+ fixed20_12 fb_div, a, b;
+
+ *P = pll->vco1.maxfreq / clk;
+ if (*P > pll->max_p)
+ *P = pll->max_p;
+ if (*P < pll->min_p)
+ *P = pll->min_p;
+
+ /* *M = ceil(refclk / pll->vco.max_inputfreq); */
+ a.full = dfixed_const(pll->refclk);
+ b.full = dfixed_const(pll->vco1.max_inputfreq);
+ a.full = dfixed_div(a, b);
+ a.full = dfixed_ceil(a);
+ *M = dfixed_trunc(a);
+
+ /* fb_div = (vco * *M) / refclk; */
+ fb_div.full = dfixed_const(clk * *P);
+ fb_div.full = dfixed_mul(fb_div, a);
+ a.full = dfixed_const(pll->refclk);
+ fb_div.full = dfixed_div(fb_div, a);
+
+ /* *N = floor(fb_div); */
+ a.full = dfixed_floor(fb_div);
+ *N = dfixed_trunc(fb_div);
+
+ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+ b.full = dfixed_const(8192);
+ a.full = dfixed_mul(a, b);
+ fb_div.full = dfixed_mul(fb_div, b);
+ fb_div.full = fb_div.full - a.full;
+ *fN = dfixed_trunc(fb_div) - 4096;
+ *fN &= 0xffff;
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e3400c..b4e4a3b05eae 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- struct nouveau_pll_vals pll;
- struct pll_lims limits;
+ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct pll_lims pll;
uint32_t reg1, reg2;
- int ret;
+ int ret, N1, M1, N2, M2, P;
- ret = get_pll_limits(dev, pll_reg, &limits);
+ ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
- if (ret <= 0)
- return ret;
+ if (pll.vco2.maxfreq) {
+ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+ pclk, ret, N1, M1, N2, M2, P);
- if (limits.vco2.maxfreq) {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
- nv_wr32(dev, pll_reg, 0x10000611);
- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
- nv_wr32(dev, pll_reg + 8,
- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+ nv_wr32(dev, reg, 0x10000611);
+ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
- nv_wr32(dev, pll_reg, 0x50000610);
- nv_wr32(dev, pll_reg + 4, reg1 |
- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+ nv_wr32(dev, reg, 0x50000610);
+ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, reg + 8, N2);
}
return 0;
@@ -358,9 +366,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 753e723adb3a..03ad7ab14f09 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
struct drm_device *dev = nv_crtc->base.dev;
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
((y & 0xFFFF) << 16) | (x & 0xFFFF));
/* Needed to make the cursor move. */
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042a8e10..1fd9537beff6 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
/* Use bios provided value if possible. */
- if (dev_priv->vbios->dactestval) {
- load_pattern = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval) {
+ load_pattern = dev_priv->vbios.dactestval;
NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
load_pattern);
} else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf59fbcd..580a5d10be93 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@@ -143,7 +144,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
}
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, nouveau_mem_fb_amount(dev));
+ 0, dev_priv->vram_size);
if (ret) {
nv50_evo_channel_del(pchan);
return ret;
@@ -231,7 +232,7 @@ nv50_display_init(struct drm_device *dev)
/* This used to be in crtc unblank, but seems out of place there. */
nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
/* RAM is clamped to 256 MiB. */
- ram_amount = nouveau_mem_fb_amount(dev);
+ ram_amount = dev_priv->vram_size;
NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
if (ram_amount > 256*1024*1024)
ram_amount = 256*1024*1024;
@@ -370,9 +371,7 @@ nv50_display_init(struct drm_device *dev)
struct nouveau_connector *conn = nouveau_connector(connector);
struct dcb_gpio_entry *gpio;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
- connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if (conn->dcb->gpio_tag == 0xff)
continue;
gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +464,7 @@ static int nv50_display_disable(struct drm_device *dev)
int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
- uint32_t connector[16] = {};
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
@@ -522,49 +520,20 @@ int nv50_display_create(struct drm_device *dev)
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
continue;
}
-
- connector[entry->connector] |= (1 << entry->type);
}
- /* It appears that DCB 3.0+ VBIOS has a connector table, however,
- * I'm not 100% certain how to decode it correctly yet so just
- * look at what encoders are present on each connector index and
- * attempt to derive the connector type from that.
- */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_entry *entry = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[entry->connector];
- if (!(encoders & (1 << entry->type)))
+ for (i = 0 ; i < dcb->connector.entries; i++) {
+ if (i != 0 && dcb->connector.entry[i].index2 ==
+ dcb->connector.entry[i - 1].index2)
continue;
- connector[entry->connector] = 0;
-
- if (encoders & (1 << OUTPUT_DP)) {
- type = DRM_MODE_CONNECTOR_DisplayPort;
- } else if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DRM_MODE_CONNECTOR_DVII;
- else
- type = DRM_MODE_CONNECTOR_DVID;
- } else if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DRM_MODE_CONNECTOR_VGA;
- } else if (encoders & (1 << OUTPUT_LVDS)) {
- type = DRM_MODE_CONNECTOR_LVDS;
- } else {
- type = DRM_MODE_CONNECTOR_Unknown;
- }
-
- if (type == DRM_MODE_CONNECTOR_Unknown)
- continue;
-
- nouveau_connector_create(dev, entry->connector, type);
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
}
ret = nv50_display_init(dev);
- if (ret)
+ if (ret) {
+ nv50_display_destroy(dev);
return ret;
+ }
return 0;
}
@@ -667,8 +636,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
return -1;
}
- for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
- struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
if (dcbent->type != type)
continue;
@@ -692,7 +661,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t mc, script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +679,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
switch (dcbent->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
- if (bios->pub.fp_no_ddc) {
+ if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
script |= 0x0100;
if (bios->fp.if_is_24bit)
@@ -815,6 +784,37 @@ ack:
}
static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ uint32_t tmp, unk0 = 0, unk1 = 0;
+
+ if (dcb->type != OUTPUT_DP)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb == dcb) {
+ unk0 = nv_encoder->dp.unk0;
+ unk1 = nv_encoder->dp.unk1;
+ break;
+ }
+ }
+
+ if (unk0 || unk1) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= 0xfffffe03;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ tmp &= 0xfef080c0;
+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+ }
+}
+
+static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct dcb_entry *dcbent;
@@ -837,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ nv50_display_unk20_dp_hack(dev, dcbent);
+
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -919,10 +921,12 @@ nv50_display_error_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
}
-static void
-nv50_display_irq_hotplug(struct drm_device *dev)
+void
+nv50_display_irq_hotplug_bh(struct work_struct *work)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv =
+ container_of(work, struct drm_nouveau_private, hpd_work);
+ struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
uint32_t unplug_mask, plug_mask, change_mask;
@@ -975,6 +979,8 @@ nv50_display_irq_hotplug(struct drm_device *dev)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+ drm_helper_hpd_irq_event(dev);
}
void
@@ -983,8 +989,10 @@ nv50_display_irq_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t delayed = 0;
- while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
- nv50_display_irq_hotplug(dev);
+ if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
+ if (!work_pending(&dev_priv->hpd_work))
+ queue_work(dev_priv->wq, &dev_priv->hpd_work);
+ }
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 3ae8d0725f63..581d405ac014 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -37,6 +37,7 @@
void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
+void nv50_display_irq_hotplug_bh(struct work_struct *work);
int nv50_display_init(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
int nv50_display_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 000000000000..32611bd30e6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,38 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv50_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ nv_wr32(dev, 0x100c90, 0x0707ff);
+ break;
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(dev, 0x100c90, 0x0d0fff);
+ break;
+ default:
+ nv_wr32(dev, 0x100c90, 0x1d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv50_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf7ccb2..6bf025c6fc6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
+ width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
BEGIN_RING(chan, NvSub2D, 0x0814, 2);
@@ -152,13 +152,16 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;
+ uint64_t fb;
int ret, format;
+ fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
+
switch (info->var.bits_per_pixel) {
case 8:
format = 0xf3;
@@ -233,7 +236,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
BEGIN_RING(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
- OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x0840, 4);
@@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, 0);
- OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
- dev_priv->vm_vram_base);
+ OUT_RING(chan, upper_32_bits(fb));
+ OUT_RING(chan, lower_32_bits(fb));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, 0);
- OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
- dev_priv->vm_vram_base);
+ OUT_RING(chan, upper_32_bits(fb));
+ OUT_RING(chan, lower_32_bits(fb));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79ff10f4..e20c0e2474f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
}
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
- nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
- nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
- nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+ nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
+ nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
+ chan->dma.ib_base * 4);
+ nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
if (!IS_G80) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
ret = nv50_fifo_channel_enable(dev, chan->id, false);
if (ret) {
NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret;
}
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 3d4d84e078ac..bb47ad737267 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Red Hat Inc.
+ * Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,49 +19,58 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Dave Airlie
+ * Authors: Ben Skeggs
*/
-#ifndef RADEON_FIXED_H
-#define RADEON_FIXED_H
-typedef union rfixed {
- u32 full;
-} fixed20_12;
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
-
-#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
-#define fixed_init(A) { .full = rfixed_const((A)) }
-#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
-#define rfixed_trunc(A) ((A).full >> 12)
-
-static inline u32 rfixed_floor(fixed20_12 A)
+static int
+nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
- u32 non_frac = rfixed_trunc(A);
+ const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- return rfixed_const(non_frac);
+ if (gpio->line >= 32)
+ return -EINVAL;
+
+ *reg = nv50_gpio_reg[gpio->line >> 3];
+ *shift = (gpio->line & 7) << 2;
+ return 0;
}
-static inline u32 rfixed_ceil(fixed20_12 A)
+int
+nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
{
- u32 non_frac = rfixed_trunc(A);
+ struct dcb_gpio_entry *gpio;
+ uint32_t r, s, v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
- if (A.full > rfixed_const(non_frac))
- return rfixed_const(non_frac + 1);
- else
- return rfixed_const(non_frac);
+ if (nv50_gpio_location(gpio, &r, &s))
+ return -EINVAL;
+
+ v = nv_rd32(dev, r) >> (s + 2);
+ return ((v & 1) == (gpio->state[1] & 1));
}
-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
+int
+nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
{
- u64 tmp = ((u64)A.full << 13);
+ struct dcb_gpio_entry *gpio;
+ uint32_t r, s, v;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ if (nv50_gpio_location(gpio, &r, &s))
+ return -EINVAL;
- do_div(tmp, B.full);
- tmp += 1;
- tmp /= 2;
- return lower_32_bits(tmp);
+ v = nv_rd32(dev, r) & ~(0x3 << s);
+ v |= (gpio->state[state] ^ 2) << s;
+ nv_wr32(dev, r, v);
+ return 0;
}
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d504801b514..b203d06f601f 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
-MODULE_FIRMWARE("nouveau/nv50.ctxprog");
-MODULE_FIRMWARE("nouveau/nv50.ctxvals");
-MODULE_FIRMWARE("nouveau/nv84.ctxprog");
-MODULE_FIRMWARE("nouveau/nv84.ctxvals");
-MODULE_FIRMWARE("nouveau/nv86.ctxprog");
-MODULE_FIRMWARE("nouveau/nv86.ctxvals");
-MODULE_FIRMWARE("nouveau/nv92.ctxprog");
-MODULE_FIRMWARE("nouveau/nv92.ctxvals");
-MODULE_FIRMWARE("nouveau/nv94.ctxprog");
-MODULE_FIRMWARE("nouveau/nv94.ctxvals");
-MODULE_FIRMWARE("nouveau/nv96.ctxprog");
-MODULE_FIRMWARE("nouveau/nv96.ctxvals");
-MODULE_FIRMWARE("nouveau/nv98.ctxprog");
-MODULE_FIRMWARE("nouveau/nv98.ctxvals");
-MODULE_FIRMWARE("nouveau/nva0.ctxprog");
-MODULE_FIRMWARE("nouveau/nva0.ctxvals");
-MODULE_FIRMWARE("nouveau/nva5.ctxprog");
-MODULE_FIRMWARE("nouveau/nva5.ctxvals");
-MODULE_FIRMWARE("nouveau/nva8.ctxprog");
-MODULE_FIRMWARE("nouveau/nva8.ctxvals");
-MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
-MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
-MODULE_FIRMWARE("nouveau/nvac.ctxprog");
-MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+#include "nouveau_grctx.h"
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
@@ -79,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
static void
nv50_graph_init_regs__nv(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i;
+
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x400804, 0xc0000000);
@@ -88,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
+ for (i = 0; i < 16; i++) {
+ if (units & 1 << i) {
+ if (dev_priv->chipset < 0xa0) {
+ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+ }
+ }
+ }
+
nv_wr32(dev, 0x400108, 0xffffffff);
nv_wr32(dev, 0x400824, 0x00004000);
@@ -111,9 +106,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
NV_DEBUG(dev, "\n");
- nouveau_grctx_prog_load(dev);
- if (!dev_priv->engine.graph.ctxprog)
- dev_priv->engine.graph.accel_blocked = true;
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 0x70000;
+ }
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
+ int i;
+ if (!cp) {
+ NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
+ }
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 512;
+ if (!nv50_grctx_init(&ctx)) {
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ } else {
+ dev_priv->engine.graph.accel_blocked = true;
+ }
+ kfree(cp);
+ }
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +213,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
struct nouveau_gpuobj *ctx;
- uint32_t grctx_size = 0x70000;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC |
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
@@ -209,7 +229,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
- grctx_size - 1);
+ pgraph->grctx_size - 1);
nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,12 +237,16 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
dev_priv->engine.instmem.prepare_access(dev, true);
- nouveau_grctx_vals_load(dev, ctx);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv50_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, ctx);
+ }
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
- if ((dev_priv->chipset & 0xf0) == 0xa0)
- nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
- else
- nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
dev_priv->engine.instmem.finish_access(dev);
return 0;
@@ -386,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
{ 0x5039, false, NULL }, /* m2mf */
{ 0x502d, false, NULL }, /* 2d */
{ 0x50c0, false, NULL }, /* compute */
+ { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
{ 0x5097, false, NULL }, /* tesla (nv50) */
- { 0x8297, false, NULL }, /* tesla (nv80/nv90) */
- { 0x8397, false, NULL }, /* tesla (nva0) */
- { 0x8597, false, NULL }, /* tesla (nva8) */
+ { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
+ { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
+ { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 000000000000..42a8fb20c1e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2383 @@
+/*
+ * Copyright 2009 Marcin Kościelnicki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_UNK01 ((0 * 32) + 1)
+#define CP_FLAG_UNK01_CLEAR 0
+#define CP_FLAG_UNK01_SET 1
+#define CP_FLAG_UNK03 ((0 * 32) + 3)
+#define CP_FLAG_UNK03_CLEAR 0
+#define CP_FLAG_UNK03_SET 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
+#define CP_FLAG_UNK0B_CLEAR 0
+#define CP_FLAG_UNK0B_SET 1
+#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
+#define CP_FLAG_UNK1D_CLEAR 0
+#define CP_FLAG_UNK1D_SET 1
+#define CP_FLAG_UNK20 ((1 * 32) + 0)
+#define CP_FLAG_UNK20_CLEAR 0
+#define CP_FLAG_UNK20_SET 1
+#define CP_FLAG_STATUS ((2 * 32) + 0)
+#define CP_FLAG_STATUS_BUSY 0
+#define CP_FLAG_STATUS_IDLE 1
+#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_NEWCTX ((2 * 32) + 10)
+#define CP_FLAG_NEWCTX_BUSY 0
+#define CP_FLAG_NEWCTX_DONE 1
+#define CP_FLAG_XFER ((2 * 32) + 11)
+#define CP_FLAG_XFER_IDLE 0
+#define CP_FLAG_XFER_BUSY 1
+#define CP_FLAG_ALWAYS ((2 * 32) + 13)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_INTR ((2 * 32) + 15)
+#define CP_FLAG_INTR_NOT_PENDING 0
+#define CP_FLAG_INTR_PENDING 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000f0000
+#define CP_CTX_COUNT_SHIFT 16
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0001ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEWCTX 0x00600004
+#define CP_NEXT_TO_SWAP 0x00600005
+#define CP_SET_CONTEXT_POINTER 0x00600006
+#define CP_SET_XFER_POINTER 0x00600007
+#define CP_ENABLE 0x00600009
+#define CP_END 0x0060000c
+#define CP_NEXT_TO_CURRENT 0x0060000d
+#define CP_DISABLE1 0x0090ffff
+#define CP_DISABLE2 0x0091ffff
+#define CP_XFER_1 0x008000ff
+#define CP_XFER_2 0x008800ff
+#define CP_SEEK_1 0x00c000ff
+#define CP_SEEK_2 0x00c800ff
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/*
+ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
+ * the GPU itself that does context-switching, but it needs a special
+ * microcode to do it. And it's the driver's task to supply this microcode,
+ * further known as ctxprog, as well as the initial context values, known
+ * as ctxvals.
+ *
+ * Without ctxprog, you cannot switch contexts. Not even in software, since
+ * the majority of context [xfer strands] isn't accessible directly. You're
+ * stuck with a single channel, and you also suffer all the problems resulting
+ * from missing ctxvals, since you cannot load them.
+ *
+ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
+ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
+ * since you don't have... some sort of needed setup.
+ *
+ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
+ * it's too much hassle to handle no-ctxprog as a special case.
+ */
+
+/*
+ * How ctxprogs work.
+ *
+ * The ctxprog is written in its own kind of microcode, with very small and
+ * crappy set of available commands. You upload it to a small [512 insns]
+ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
+ * switch channel. or when the driver explicitely requests it. Stuff visible
+ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
+ * the per-channel context save area in VRAM [known as ctxvals or grctx],
+ * 4 flags registers, a scratch register, two grctx pointers, plus many
+ * random poorly-understood details.
+ *
+ * When ctxprog runs, it's supposed to check what operations are asked of it,
+ * save old context if requested, optionally reset PGRAPH and switch to the
+ * new channel, and load the new context. Context consists of three major
+ * parts: subset of MMIO registers and two "xfer areas".
+ */
+
+/* TODO:
+ * - document unimplemented bits compared to nvidia
+ * - NVAx: make a TP subroutine, use it.
+ * - use 0x4008fc instead of 0x1540?
+ */
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+
+/* Main function: construct the ctxprog skeleton, call the other functions. */
+
+int
+nv50_grctx_init(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ break;
+ default:
+ NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
+ "your NV%x card.\n", dev_priv->chipset);
+ NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
+ "the devs.\n");
+ return -ENOSYS;
+ }
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_out (ctx, CP_DISABLE1);
+ cp_out (ctx, CP_DISABLE2);
+ cp_out (ctx, CP_ENABLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_set (ctx, UNK01, SET);
+ cp_name(ctx, cp_setup_load);
+ cp_out (ctx, CP_NEWCTX);
+ cp_wait(ctx, NEWCTX, BUSY);
+ cp_set (ctx, UNK1D, CLEAR);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, UNK1D, SET);
+ cp_wait(ctx, STATUS, BUSY);
+ cp_wait(ctx, INTR, PENDING);
+ cp_bra (ctx, STATUS, BUSY, cp_setup_save);
+ cp_set (ctx, UNK01, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_set (ctx, UNK03, SET);
+ cp_pos (ctx, 0x00004/4);
+ cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
+ cp_pos (ctx, 0x00100/4);
+ nv50_graph_construct_mmio(ctx);
+ nv50_graph_construct_xfer1(ctx);
+ nv50_graph_construct_xfer2(ctx);
+
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+
+ cp_set (ctx, UNK20, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
+ cp_lsr (ctx, ctx->ctxvals_base);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, 4);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_set (ctx, UNK01, CLEAR);
+ cp_set (ctx, UNK03, CLEAR);
+ cp_set (ctx, UNK1D, CLEAR);
+
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+ ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
+
+ return 0;
+}
+
+/*
+ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
+ * registers to save/restore and the default values for them.
+ */
+
+static void
+nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, j;
+ int offset, base;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ /* 0800: DISPATCH */
+ cp_ctx(ctx, 0x400808, 7);
+ gr_def(ctx, 0x400814, 0x00000030);
+ cp_ctx(ctx, 0x400834, 0x32);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, 0x400834, 0xff400040);
+ gr_def(ctx, 0x400838, 0xfff00080);
+ gr_def(ctx, 0x40083c, 0xfff70090);
+ gr_def(ctx, 0x400840, 0xffe806a8);
+ }
+ gr_def(ctx, 0x400844, 0x00000002);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x400894, 0x00001000);
+ gr_def(ctx, 0x4008e8, 0x00000003);
+ gr_def(ctx, 0x4008ec, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x400908, 0xb);
+ else if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x400908, 0xc);
+ else
+ cp_ctx(ctx, 0x400908, 0xe);
+
+ if (dev_priv->chipset >= 0xa0)
+ cp_ctx(ctx, 0x400b00, 0x1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ cp_ctx(ctx, 0x400b10, 0x1);
+ gr_def(ctx, 0x400b10, 0x0001629d);
+ cp_ctx(ctx, 0x400b20, 0x1);
+ gr_def(ctx, 0x400b20, 0x0001629d);
+ }
+
+ /* 0C00: VFETCH */
+ cp_ctx(ctx, 0x400c08, 0x2);
+ gr_def(ctx, 0x400c08, 0x0000fe0c);
+
+ /* 1000 */
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x401008, 0x4);
+ gr_def(ctx, 0x401014, 0x00001000);
+ } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00001000);
+ } else {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00004000);
+ }
+
+ /* 1400 */
+ cp_ctx(ctx, 0x401400, 0x8);
+ cp_ctx(ctx, 0x401424, 0x3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x40142c, 0x0001fd87);
+ else
+ gr_def(ctx, 0x40142c, 0x00000187);
+ cp_ctx(ctx, 0x401540, 0x5);
+ gr_def(ctx, 0x401550, 0x00001018);
+
+ /* 1800: STREAMOUT */
+ cp_ctx(ctx, 0x401814, 0x1);
+ gr_def(ctx, 0x401814, 0x000000ff);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x40181c, 0xe);
+ gr_def(ctx, 0x401850, 0x00000004);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x40181c, 0xf);
+ gr_def(ctx, 0x401854, 0x00000004);
+ } else {
+ cp_ctx(ctx, 0x40181c, 0x13);
+ gr_def(ctx, 0x401864, 0x00000004);
+ }
+
+ /* 1C00 */
+ cp_ctx(ctx, 0x401c00, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x401c00, 0x0001005f);
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x94:
+ gr_def(ctx, 0x401c00, 0x044d00df);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x401c00, 0x042500df);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ gr_def(ctx, 0x401c00, 0x142500df);
+ break;
+ }
+
+ /* 2400 */
+ cp_ctx(ctx, 0x402400, 0x1);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402408, 0x1);
+ else
+ cp_ctx(ctx, 0x402408, 0x2);
+ gr_def(ctx, 0x402408, 0x00000600);
+
+ /* 2800 */
+ cp_ctx(ctx, 0x402800, 0x1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x402800, 0x00000006);
+
+ /* 2C00 */
+ cp_ctx(ctx, 0x402c08, 0x6);
+ if (dev_priv->chipset != 0x50)
+ gr_def(ctx, 0x402c14, 0x01000000);
+ gr_def(ctx, 0x402c18, 0x000000ff);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402ca0, 0x1);
+ else
+ cp_ctx(ctx, 0x402ca0, 0x2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, 0x402ca0, 0x00000800);
+ else
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ cp_ctx(ctx, 0x402cac, 0x4);
+
+ /* 3000 */
+ cp_ctx(ctx, 0x403004, 0x1);
+ gr_def(ctx, 0x403004, 0x00000001);
+
+ /* 3404 */
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x403404, 0x1);
+ gr_def(ctx, 0x403404, 0x00000001);
+ }
+
+ /* 5000 */
+ cp_ctx(ctx, 0x405000, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x405000, 0x00300080);
+ break;
+ case 0x84:
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x405000, 0x000e0080);
+ break;
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, 0x405000, 0x00000080);
+ break;
+ }
+ cp_ctx(ctx, 0x405014, 0x1);
+ gr_def(ctx, 0x405014, 0x00000004);
+ cp_ctx(ctx, 0x40501c, 0x1);
+ cp_ctx(ctx, 0x405024, 0x1);
+ cp_ctx(ctx, 0x40502c, 0x1);
+
+ /* 5400 or maybe 4800 */
+ if (dev_priv->chipset == 0x50) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xea);
+ } else if (dev_priv->chipset < 0x94) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcb);
+ } else if (dev_priv->chipset < 0xa0) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcc);
+ } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ offset = 0x404800;
+ cp_ctx(ctx, 0x404800, 0xda);
+ } else {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xd4);
+ }
+ gr_def(ctx, offset + 0x0c, 0x00000002);
+ gr_def(ctx, offset + 0x10, 0x00000001);
+ if (dev_priv->chipset >= 0x94)
+ offset += 4;
+ gr_def(ctx, offset + 0x1c, 0x00000001);
+ gr_def(ctx, offset + 0x20, 0x00000100);
+ gr_def(ctx, offset + 0x38, 0x00000002);
+ gr_def(ctx, offset + 0x3c, 0x00000001);
+ gr_def(ctx, offset + 0x40, 0x00000001);
+ gr_def(ctx, offset + 0x50, 0x00000001);
+ gr_def(ctx, offset + 0x54, 0x003fffff);
+ gr_def(ctx, offset + 0x58, 0x00001fff);
+ gr_def(ctx, offset + 0x60, 0x00000001);
+ gr_def(ctx, offset + 0x64, 0x00000001);
+ gr_def(ctx, offset + 0x6c, 0x00000001);
+ gr_def(ctx, offset + 0x70, 0x00000001);
+ gr_def(ctx, offset + 0x74, 0x00000001);
+ gr_def(ctx, offset + 0x78, 0x00000004);
+ gr_def(ctx, offset + 0x7c, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x80, 0x00000001);
+ gr_def(ctx, offset + 0x84, 0x00000001);
+ gr_def(ctx, offset + 0x88, 0x00000007);
+ gr_def(ctx, offset + 0x8c, 0x00000001);
+ gr_def(ctx, offset + 0x90, 0x00000007);
+ gr_def(ctx, offset + 0x94, 0x00000001);
+ gr_def(ctx, offset + 0x98, 0x00000001);
+ gr_def(ctx, offset + 0x9c, 0x00000001);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000001);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc0, 0x0000000a);
+ gr_def(ctx, offset + 0xd0, 0x00000040);
+ gr_def(ctx, offset + 0xd8, 0x00000002);
+ gr_def(ctx, offset + 0xdc, 0x00000100);
+ gr_def(ctx, offset + 0xe0, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ gr_def(ctx, offset + 0x100, 0x00000001);
+ gr_def(ctx, offset + 0x124, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000001);
+ gr_def(ctx, offset + 0x140, 0x00000100);
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x154, 0x00000100);
+ gr_def(ctx, offset + 0x158, 0x00000001);
+ gr_def(ctx, offset + 0x15c, 0x00000100);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x170, 0x00000100);
+ gr_def(ctx, offset + 0x174, 0x00000001);
+ gr_def(ctx, offset + 0x17c, 0x00000001);
+ gr_def(ctx, offset + 0x188, 0x00000002);
+ gr_def(ctx, offset + 0x190, 0x00000001);
+ gr_def(ctx, offset + 0x198, 0x00000001);
+ gr_def(ctx, offset + 0x1ac, 0x00000003);
+ offset += 0xd0;
+ } else {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000100);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc8, 0x00000100);
+ gr_def(ctx, offset + 0xcc, 0x00000001);
+ gr_def(ctx, offset + 0xd0, 0x00000100);
+ gr_def(ctx, offset + 0xd8, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ }
+ gr_def(ctx, offset + 0xf8, 0x00000004);
+ gr_def(ctx, offset + 0xfc, 0x00000070);
+ gr_def(ctx, offset + 0x100, 0x00000080);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x114, 0x0000000c);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x11c, 0x00000008);
+ gr_def(ctx, offset + 0x120, 0x00000014);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0x124, 0x00000026);
+ offset -= 0x18;
+ } else {
+ gr_def(ctx, offset + 0x128, 0x00000029);
+ gr_def(ctx, offset + 0x12c, 0x00000027);
+ gr_def(ctx, offset + 0x130, 0x00000026);
+ gr_def(ctx, offset + 0x134, 0x00000008);
+ gr_def(ctx, offset + 0x138, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000027);
+ }
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x14c, 0x00000002);
+ gr_def(ctx, offset + 0x150, 0x00000003);
+ gr_def(ctx, offset + 0x154, 0x00000004);
+ gr_def(ctx, offset + 0x158, 0x00000005);
+ gr_def(ctx, offset + 0x15c, 0x00000006);
+ gr_def(ctx, offset + 0x160, 0x00000007);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x1a8, 0x000000cf);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1d8, 0x00000080);
+ gr_def(ctx, offset + 0x1dc, 0x00000004);
+ gr_def(ctx, offset + 0x1e0, 0x00000004);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ else
+ gr_def(ctx, offset + 0x1e4, 0x00000003);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x1ec, 0x00000003);
+ offset += 8;
+ }
+ gr_def(ctx, offset + 0x1e8, 0x00000001);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1f4, 0x00000012);
+ gr_def(ctx, offset + 0x1f8, 0x00000010);
+ gr_def(ctx, offset + 0x1fc, 0x0000000c);
+ gr_def(ctx, offset + 0x200, 0x00000001);
+ gr_def(ctx, offset + 0x210, 0x00000004);
+ gr_def(ctx, offset + 0x214, 0x00000002);
+ gr_def(ctx, offset + 0x218, 0x00000004);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 4;
+ gr_def(ctx, offset + 0x224, 0x003fffff);
+ gr_def(ctx, offset + 0x228, 0x00001fff);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0x20;
+ else if (dev_priv->chipset >= 0xa0) {
+ gr_def(ctx, offset + 0x250, 0x00000001);
+ gr_def(ctx, offset + 0x254, 0x00000001);
+ gr_def(ctx, offset + 0x258, 0x00000002);
+ offset += 0x10;
+ }
+ gr_def(ctx, offset + 0x250, 0x00000004);
+ gr_def(ctx, offset + 0x254, 0x00000014);
+ gr_def(ctx, offset + 0x258, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x264, 0x00000002);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 8;
+ gr_def(ctx, offset + 0x270, 0x00000001);
+ gr_def(ctx, offset + 0x278, 0x00000002);
+ gr_def(ctx, offset + 0x27c, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0xc;
+ else {
+ gr_def(ctx, offset + 0x280, 0x00000e00);
+ gr_def(ctx, offset + 0x284, 0x00001000);
+ gr_def(ctx, offset + 0x288, 0x00001e00);
+ }
+ gr_def(ctx, offset + 0x290, 0x00000001);
+ gr_def(ctx, offset + 0x294, 0x00000001);
+ gr_def(ctx, offset + 0x298, 0x00000001);
+ gr_def(ctx, offset + 0x29c, 0x00000001);
+ gr_def(ctx, offset + 0x2a0, 0x00000001);
+ gr_def(ctx, offset + 0x2b0, 0x00000200);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x2b4, 0x00000200);
+ offset += 4;
+ }
+ if (dev_priv->chipset < 0xa0) {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x00000070);
+ gr_def(ctx, offset + 0x2c0, 0x00000080);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x00000070);
+ gr_def(ctx, offset + 0x2d4, 0x00000080);
+ } else {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x000000f0);
+ gr_def(ctx, offset + 0x2c0, 0x000000ff);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x000000f0);
+ gr_def(ctx, offset + 0x2d4, 0x000000ff);
+ gr_def(ctx, offset + 0x2dc, 0x00000009);
+ offset += 4;
+ }
+ gr_def(ctx, offset + 0x2e4, 0x00000001);
+ gr_def(ctx, offset + 0x2e8, 0x000000cf);
+ gr_def(ctx, offset + 0x2f0, 0x00000001);
+ gr_def(ctx, offset + 0x300, 0x000000cf);
+ gr_def(ctx, offset + 0x308, 0x00000002);
+ gr_def(ctx, offset + 0x310, 0x00000001);
+ gr_def(ctx, offset + 0x318, 0x00000001);
+ gr_def(ctx, offset + 0x320, 0x000000cf);
+ gr_def(ctx, offset + 0x324, 0x000000cf);
+ gr_def(ctx, offset + 0x328, 0x00000001);
+
+ /* 6000? */
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x4063e0, 0x1);
+
+ /* 6800: M2MF */
+ if (dev_priv->chipset < 0x90) {
+ cp_ctx(ctx, 0x406814, 0x2b);
+ gr_def(ctx, 0x406818, 0x00000f80);
+ gr_def(ctx, 0x406860, 0x007f0080);
+ gr_def(ctx, 0x40689c, 0x007f0080);
+ } else {
+ cp_ctx(ctx, 0x406814, 0x4);
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x406818, 0x00000f80);
+ else
+ gr_def(ctx, 0x406818, 0x00001f80);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x40681c, 0x00000030);
+ cp_ctx(ctx, 0x406830, 0x3);
+ }
+
+ /* 7000: per-ROP group state */
+ for (i = 0; i < 8; i++) {
+ if (units & (1<<(i+16))) {
+ cp_ctx(ctx, 0x407000 + (i<<8), 3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
+ else if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
+ else
+ gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
+ gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
+
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 1);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 2);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
+ } else {
+ cp_ctx(ctx, 0x407010 + (i<<8), 3);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
+ else
+ gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
+ }
+
+ cp_ctx(ctx, 0x407080 + (i<<8), 4);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
+ else
+ gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
+ else
+ gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
+ gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x407094 + (i<<8), 1);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ cp_ctx(ctx, 0x407094 + (i<<8), 3);
+ else {
+ cp_ctx(ctx, 0x407094 + (i<<8), 4);
+ gr_def(ctx, 0x4070a0 + (i<<8), 1);
+ }
+ }
+ }
+
+ cp_ctx(ctx, 0x407c00, 0x3);
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407c00, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407c00, 0x00390040);
+ else
+ gr_def(ctx, 0x407c00, 0x003d0040);
+ gr_def(ctx, 0x407c08, 0x00000022);
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x407c10, 0x3);
+ cp_ctx(ctx, 0x407c20, 0x1);
+ cp_ctx(ctx, 0x407c2c, 0x1);
+ }
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407d00, 0x9);
+ } else {
+ cp_ctx(ctx, 0x407d00, 0x15);
+ }
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x407d08, 0x00380040);
+ else {
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407d08, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407d08, 0x00390040);
+ else
+ gr_def(ctx, 0x407d08, 0x003d0040);
+ gr_def(ctx, 0x407d0c, 0x00000022);
+ }
+
+ /* 8000+: per-TP state */
+ for (i = 0; i < 10; i++) {
+ if (units & (1<<i)) {
+ if (dev_priv->chipset < 0xa0)
+ base = 0x408000 + (i<<12);
+ else
+ base = 0x408000 + (i<<11);
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xc00;
+ else
+ offset = base + 0x80;
+ cp_ctx(ctx, offset + 0x00, 1);
+ gr_def(ctx, offset + 0x00, 0x0000ff0a);
+ cp_ctx(ctx, offset + 0x08, 1);
+
+ /* per-MP state */
+ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ if (!(units & (1 << (j+24)))) continue;
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x200 + (j<<7);
+ else
+ offset = base + 0x100 + (j<<7);
+ cp_ctx(ctx, offset, 0x20);
+ gr_def(ctx, offset + 0x00, 0x01800000);
+ gr_def(ctx, offset + 0x04, 0x00160000);
+ gr_def(ctx, offset + 0x08, 0x01800000);
+ gr_def(ctx, offset + 0x18, 0x0003ffff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, offset + 0x1c, 0x00080000);
+ break;
+ case 0x84:
+ gr_def(ctx, offset + 0x1c, 0x00880000);
+ break;
+ case 0x86:
+ gr_def(ctx, offset + 0x1c, 0x008c0000);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, offset + 0x1c, 0x118c0000);
+ break;
+ case 0x94:
+ gr_def(ctx, offset + 0x1c, 0x10880000);
+ break;
+ case 0xa0:
+ case 0xa5:
+ gr_def(ctx, offset + 0x1c, 0x310c0000);
+ break;
+ case 0xa3:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, offset + 0x1c, 0x300c0000);
+ break;
+ }
+ gr_def(ctx, offset + 0x40, 0x00010401);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x48, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x48, 0x00000078);
+ gr_def(ctx, offset + 0x50, 0x000000bf);
+ gr_def(ctx, offset + 0x58, 0x00001210);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x5c, 0x00000080);
+ else
+ gr_def(ctx, offset + 0x5c, 0x08000080);
+ if (dev_priv->chipset >= 0xa0)
+ gr_def(ctx, offset + 0x68, 0x0000003e);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x300, 0x4);
+ else
+ cp_ctx(ctx, base + 0x300, 0x5);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x304, 0x00007070);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x304, 0x00027070);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x304, 0x01127070);
+ else
+ gr_def(ctx, base + 0x304, 0x05127070);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x318, 1);
+ else
+ cp_ctx(ctx, base + 0x320, 1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x318, 0x0003ffff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x318, 0x03ffffff);
+ else
+ gr_def(ctx, base + 0x320, 0x07ffffff);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x324, 5);
+ else
+ cp_ctx(ctx, base + 0x328, 4);
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, base + 0x340, 9);
+ offset = base + 0x340;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, base + 0x33c, 0xb);
+ offset = base + 0x344;
+ } else {
+ cp_ctx(ctx, base + 0x33c, 0xd);
+ offset = base + 0x344;
+ }
+ gr_def(ctx, offset + 0x0, 0x00120407);
+ gr_def(ctx, offset + 0x4, 0x05091507);
+ if (dev_priv->chipset == 0x84)
+ gr_def(ctx, offset + 0x8, 0x05100202);
+ else
+ gr_def(ctx, offset + 0x8, 0x05010202);
+ gr_def(ctx, offset + 0xc, 0x00030201);
+ if (dev_priv->chipset == 0xa3)
+ cp_ctx(ctx, base + 0x36c, 1);
+
+ cp_ctx(ctx, base + 0x400, 2);
+ gr_def(ctx, base + 0x404, 0x00000040);
+ cp_ctx(ctx, base + 0x40c, 2);
+ gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
+ gr_def(ctx, base + 0x410, 0x00141210);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x800;
+ else
+ offset = base + 0x500;
+ cp_ctx(ctx, offset, 6);
+ gr_def(ctx, offset + 0x0, 0x000001f0);
+ gr_def(ctx, offset + 0x4, 0x00000001);
+ gr_def(ctx, offset + 0x8, 0x00000003);
+ if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, offset + 0xc, 0x00008000);
+ gr_def(ctx, offset + 0x14, 0x00039e00);
+ cp_ctx(ctx, offset + 0x1c, 2);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x1c, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x1c, 0x00000100);
+ gr_def(ctx, offset + 0x20, 0x00003800);
+
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, base + 0x54c, 2);
+ if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x54c, 0x003fe006);
+ else
+ gr_def(ctx, base + 0x54c, 0x003fe007);
+ gr_def(ctx, base + 0x550, 0x003fe000);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xa00;
+ else
+ offset = base + 0x680;
+ cp_ctx(ctx, offset, 1);
+ gr_def(ctx, offset, 0x00404040);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xe00;
+ else
+ offset = base + 0x700;
+ cp_ctx(ctx, offset, 2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset, 0x0077f005);
+ else if (dev_priv->chipset == 0xa5)
+ gr_def(ctx, offset, 0x6cf7f007);
+ else if (dev_priv->chipset == 0xa8)
+ gr_def(ctx, offset, 0x6cfff007);
+ else if (dev_priv->chipset == 0xac)
+ gr_def(ctx, offset, 0x0cfff007);
+ else
+ gr_def(ctx, offset, 0x0cf7f007);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x4, 0x00007fff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset + 0x4, 0x003f7fff);
+ else
+ gr_def(ctx, offset + 0x4, 0x02bf7fff);
+ cp_ctx(ctx, offset + 0x2c, 1);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, offset + 0x50, 9);
+ gr_def(ctx, offset + 0x54, 0x000003ff);
+ gr_def(ctx, offset + 0x58, 0x00000003);
+ gr_def(ctx, offset + 0x5c, 0x00000003);
+ gr_def(ctx, offset + 0x60, 0x000001ff);
+ gr_def(ctx, offset + 0x64, 0x0000001f);
+ gr_def(ctx, offset + 0x68, 0x0000000f);
+ gr_def(ctx, offset + 0x6c, 0x0000000f);
+ } else if(dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x70, 1);
+ } else {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x60, 5);
+ }
+ }
+ }
+}
+
+/*
+ * xfer areas. These are a pain.
+ *
+ * There are 2 xfer areas: the first one is big and contains all sorts of
+ * stuff, the second is small and contains some per-TP context.
+ *
+ * Each area is split into 8 "strands". The areas, when saved to grctx,
+ * are made of 8-word blocks. Each block contains a single word from
+ * each strand. The strands are independent of each other, their
+ * addresses are unrelated to each other, and data in them is closely
+ * packed together. The strand layout varies a bit between cards: here
+ * and there, a single word is thrown out in the middle and the whole
+ * strand is offset by a bit from corresponding one on another chipset.
+ * For this reason, addresses of stuff in strands are almost useless.
+ * Knowing sequence of stuff and size of gaps between them is much more
+ * useful, and that's how we build the strands in our generator.
+ *
+ * NVA0 takes this mess to a whole new level by cutting the old strands
+ * into a few dozen pieces [known as genes], rearranging them randomly,
+ * and putting them back together to make new strands. Hopefully these
+ * genes correspond more or less directly to the same PGRAPH subunits
+ * as in 400040 register.
+ *
+ * The most common value in default context is 0, and when the genes
+ * are separated by 0's, gene bounduaries are quite speculative...
+ * some of them can be clearly deduced, others can be guessed, and yet
+ * others won't be resolved without figuring out the real meaning of
+ * given ctxval. For the same reason, ending point of each strand
+ * is unknown. Except for strand 0, which is the longest strand and
+ * its end corresponds to end of the whole xfer.
+ *
+ * An unsolved mystery is the seek instruction: it takes an argument
+ * in bits 8-18, and that argument is clearly the place in strands to
+ * seek to... but the offsets don't seem to correspond to offsets as
+ * seen in grctx. Perhaps there's another, real, not randomly-changing
+ * addressing in strands, and the xfer insn just happens to skip over
+ * the unused bits? NV10-NV30 PIPE comes to mind...
+ *
+ * As far as I know, there's no way to access the xfer areas directly
+ * without the help of ctxprog.
+ */
+
+static inline void
+xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+ ctx->ctxvals_pos += num << 3;
+}
+
+/* Gene declarations... */
+
+static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ int offset;
+ int size = 0;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ ctx->ctxvals_base = offset;
+
+ if (dev_priv->chipset < 0xa0) {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ xf_emit(ctx, 0x99, 0);
+ break;
+ case 0x84:
+ case 0x86:
+ xf_emit(ctx, 0x384, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ xf_emit(ctx, 0x380, 0);
+ break;
+ }
+ nv50_graph_construct_gene_m2mf (ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x4c4, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x984, 0);
+ break;
+ }
+ nv50_graph_construct_gene_unk5(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xa, 0);
+ else
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk3(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 0x1;
+ nv50_graph_construct_gene_unk6(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ nv50_graph_construct_gene_unk8(ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xfb, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xd3, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xab, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x6b, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 0x2;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xa80, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xa7e, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xa7c, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0xa7a, 0);
+ break;
+ }
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ xf_emit(ctx, 0xe, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ nv50_graph_construct_gene_unk2(ctx);
+ nv50_graph_construct_gene_unk1(ctx);
+ nv50_graph_construct_gene_unk10(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3: per-ROP group state */
+ ctx->ctxvals_pos = offset + 3;
+ for (i = 0; i < 6; i++)
+ if (units & (1 << (i + 16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strands 4-7: per-TP state */
+ for (i = 0; i < 4; i++) {
+ ctx->ctxvals_pos = offset + 4 + i;
+ if (units & (1 << (2 * i)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << (2 * i + 1)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x385, 0);
+ else
+ xf_emit(ctx, 0x384, 0);
+ nv50_graph_construct_gene_m2mf(ctx);
+ xf_emit(ctx, 0x950, 0);
+ nv50_graph_construct_gene_unk10(ctx);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ nv50_graph_construct_gene_unk8(ctx);
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x189, 0);
+ else if (dev_priv->chipset == 0xa3)
+ xf_emit(ctx, 0xd5, 0);
+ else if (dev_priv->chipset == 0xa5)
+ xf_emit(ctx, 0x99, 0);
+ else if (dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x65, 0);
+ else
+ xf_emit(ctx, 0x6d, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 1;
+ nv50_graph_construct_gene_unk1(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 2;
+ if (dev_priv->chipset == 0xa0) {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ xf_emit(ctx, 0x36, 0);
+ nv50_graph_construct_gene_unk5(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3 */
+ ctx->ctxvals_pos = offset + 3;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ nv50_graph_construct_gene_unk6(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 4 */
+ ctx->ctxvals_pos = offset + 4;
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0xa80, 0);
+ else if (dev_priv->chipset == 0xa3)
+ xf_emit(ctx, 0xa7c, 0);
+ else
+ xf_emit(ctx, 0xa7a, 0);
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 5 */
+ ctx->ctxvals_pos = offset + 5;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ for (i = 0; i < 8; i++)
+ if (units & (1<<(i+16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 6 */
+ ctx->ctxvals_pos = offset + 6;
+ nv50_graph_construct_gene_unk3(ctx);
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 7 */
+ ctx->ctxvals_pos = offset + 7;
+ if (dev_priv->chipset == 0xa0) {
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp(ctx);
+ } else {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+}
+
+/*
+ * non-trivial demagiced parts of ctx init go here
+ */
+
+static void
+nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+{
+ /* m2mf state */
+ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT */
+ xf_emit (ctx, 1, 0); /* PITCH_IN */
+ xf_emit (ctx, 1, 0); /* PITCH_OUT */
+ xf_emit (ctx, 1, 0); /* LINE_LENGTH */
+ xf_emit (ctx, 1, 0); /* LINE_COUNT */
+ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
+ xf_emit (ctx, 1, 1); /* LINEAR_IN */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
+ xf_emit (ctx, 1, 1); /* LINEAR_OUT */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+}
+
+static void
+nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0x542, 0);
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x942, 0);
+ break;
+ case 0xa0:
+ case 0xa3:
+ xf_emit(ctx, 0x2042, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x842, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 3, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+{
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 0x10, 0x04000000);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+}
+
+static void
+nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ }
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0x7f);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 6, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 2, 0x88);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x3f800000);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 0x28, 0);
+ else
+ xf_emit(ctx, 0x25, 0);
+ xf_emit(ctx, 1, 0x52);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x10, 0);
+ else
+ xf_emit(ctx, 0x11, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 0x20, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+{
+ /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x1c4d, 0);
+ else
+ xf_emit(ctx, 0x1c4b, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x3c1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 8, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x20);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0xe, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 0xd, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ if (dev_priv->chipset == 0xa8)
+ xf_emit(ctx, 1, 0x1e00);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x125, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x126, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x124, 0);
+ else
+ xf_emit(ctx, 0x1f7, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xa1, 0);
+ else
+ xf_emit(ctx, 0x5a, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x834, 0);
+ else if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x1873, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x8ba, 0);
+ else
+ xf_emit(ctx, 0x833, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xf, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x100);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0xcf);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x4444480);
+ xf_emit(ctx, 0x37, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+{
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+}
+
+static void
+nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
+ xf_emit(ctx, 1, 0x3f800000);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x12, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 1, 0x52);
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 0x13, 0);
+ } else {
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else
+ xf_emit(ctx, 0x10, 0);
+ }
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x26, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 0xffff);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1f, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ }
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x26, 0);
+ else
+ xf_emit(ctx, 0x3c, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 4, 4);
+ xf_emit(ctx, 0x2c, 0);
+}
+
+static void
+nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic2 = 0x001ffe67;
+ } else {
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 7, 0);
+ if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset != 0xa0) {
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 1, 0x15);
+ }
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x13, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x10, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 0x19, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 0xb, 0);
+ } else {
+ xf_emit(ctx, 0xc, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ } else {
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x1b, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x5b, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic3;
+ if (dev_priv->chipset == 0x50)
+ magic3 = 0x1000;
+ else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+ magic3 = 0x1e00;
+ else
+ magic3 = 0;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x24, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x14, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x03020100);
+ else
+ xf_emit(ctx, 1, 0x00608080);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x03020100);
+ xf_emit(ctx, 1, 3);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ xf_emit(ctx, 0x1024, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0xa24, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x214, 0);
+ else
+ xf_emit(ctx, 0x414, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic1, magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic1 = 0x3ff;
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic1 = 0x7ff;
+ magic2 = 0x001ffe67;
+ } else {
+ magic1 = 0x7ff;
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ } else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 0xcf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if(dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x28, 0);
+ xf_emit(ctx, 8, 8);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 8, 0x400);
+ xf_emit(ctx, 8, 0x300);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x20);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ } else
+ xf_emit(ctx, 0x17, 0);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 0);
+ else if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x2a712488);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x4085c000);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0x10100);
+ xf_emit(ctx, 1, 0x02800000);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x30201000);
+ xf_emit(ctx, 1, 0x70605040);
+ xf_emit(ctx, 1, 0xb8a89888);
+ xf_emit(ctx, 1, 0xf8e8d8c8);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 6, 0);
+ } else {
+ xf_emit(ctx, 0xb, 0);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ if (dev_priv->chipset < 0xa0) {
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0x12, 0);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ } else {
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x5(ctx);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, mpcnt;
+ if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ mpcnt = 1;
+ else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
+ mpcnt = 2;
+ else
+ mpcnt = 3;
+ for (i = 0; i < mpcnt; i++) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0x80007004);
+ xf_emit(ctx, 1, 0x04000400);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0xc0);
+ xf_emit(ctx, 1, 0x1000);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
+ xf_emit(ctx, 1, 0xe00);
+ xf_emit(ctx, 1, 0x1e00);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0x1000);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ }
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0x1fe21);
+ }
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x3a0, 0);
+ else if (dev_priv->chipset < 0x94)
+ xf_emit(ctx, 0x3a2, 0);
+ else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x39f, 0);
+ else
+ xf_emit(ctx, 0x3a3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x2d, 0);
+}
+
+static void
+nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ uint32_t offset;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ int size = 0;
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+
+ if (dev_priv->chipset < 0xa0) {
+ for (i = 0; i < 8; i++) {
+ ctx->ctxvals_pos = offset + i;
+ if (i == 0)
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << i))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0: TPs 0, 1 */
+ ctx->ctxvals_pos = offset;
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 2, 3 */
+ ctx->ctxvals_pos = offset + 1;
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 4, 5, 6 */
+ ctx->ctxvals_pos = offset + 2;
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 7, 8, 9 */
+ ctx->ctxvals_pos = offset + 3;
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_2);
+ cp_out (ctx, CP_XFER_2);
+ cp_wait(ctx, XFER, BUSY);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e36ef05..5f21df31f3aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+ uint32_t save_nv001700;
+ uint64_t v;
struct nv50_instmem_priv *priv;
int ret, i;
- uint32_t v, save_nv001700;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
- else
- dev_priv->vram_sys_base = 0;
-
/* Reserve the last MiB of VRAM, we should probably try to avoid
* setting up the below tables over the top of the VBIOS image at
* some point.
*/
dev_priv->ramin_rsvd_vram = 1 << 20;
- c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+ c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
c_size = 128 << 10;
c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
@@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev)
dev_priv->vm_gart_size = NV50_VM_BLOCK;
dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
+ dev_priv->vm_vram_size = dev_priv->vram_size;
if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
@@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev)
i = 0;
while (v < dev_priv->vram_sys_base + c_offset + c_size) {
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
v += 0x1000;
i += 8;
}
@@ -390,7 +386,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
if (gpuobj->im_backing)
return -EINVAL;
- *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
if (*sz == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c2fff543b06f..812778db76ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode_ctl = 0x0200;
break;
case OUTPUT_DP:
- mode_ctl |= 0x00050000;
+ mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
if (nv_encoder->dcb->sorconf.link & 1)
mode_ctl |= 0x00000800;
else
@@ -319,5 +319,28 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+ uint32_t tmp;
+
+ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
+
+ switch ((tmp & 0x00000f00) >> 8) {
+ case 8:
+ case 9:
+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ nv_encoder->dp.unk0 = tmp & 0x000001fc;
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
+ break;
+ default:
+ break;
+ }
+
+ if (!nv_encoder->dp.mc_unknown)
+ nv_encoder->dp.mc_unknown = 5;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 4c39a407aa4a..e671d0e74d4c 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -31,6 +31,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1ea..84b1f2729d43 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,12 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,10 +48,14 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
-radeon-y += radeon_device.o radeon_kms.o \
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
@@ -54,8 +64,10 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o evergreen_cs.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f66f196..1d569830ed99 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
#define ATOM_DEBUG
@@ -52,15 +53,17 @@
typedef struct {
struct atom_context *ctx;
-
uint32_t *ps, *ws;
int ps_shift;
uint16_t start;
+ unsigned last_jump;
+ unsigned long last_jump_jiffies;
+ bool abort;
} atom_exec_context;
int atom_debug = 0;
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -604,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8((*ptr)++);
+ int r = 0;
+
if (idx < ATOM_TABLE_NAMES_CNT)
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ if (r) {
+ ctx->abort = true;
+ }
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -673,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
{
int execute = 0, target = U16(*ptr);
+ unsigned long cjiffies;
+
(*ptr) += 2;
switch (arg) {
case ATOM_COND_ABOVE:
@@ -700,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
- if (execute)
+ if (execute) {
+ if (ctx->last_jump == (ctx->start + target)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+ cjiffies -= ctx->last_jump_jiffies;
+ if ((jiffies_to_msecs(cjiffies) > 1000)) {
+ DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+ ctx->abort = true;
+ }
+ } else {
+ /* jiffies wrap around we will just wait a little longer */
+ ctx->last_jump_jiffies = jiffies;
+ }
+ } else {
+ ctx->last_jump = ctx->start + target;
+ ctx->last_jump_jiffies = jiffies;
+ }
*ptr = ctx->start + target;
+ }
}
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -881,13 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
@@ -897,13 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
@@ -1108,15 +1141,16 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
unsigned char op;
atom_exec_context ectx;
+ int ret = 0;
if (!base)
- return;
+ return -EINVAL;
len = CU16(base + ATOM_CT_SIZE_PTR);
ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1129,6 +1163,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.abort = false;
+ ectx.last_jump = 0;
if (ws)
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
else
@@ -1141,6 +1177,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+ if (ectx.abort) {
+ DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+ base, len, ws, ps, ptr - 1);
+ ret = -EINVAL;
+ goto free;
+ }
if (op < ATOM_OP_CNT && op > 0)
opcode_table[op].func(&ectx, &ptr,
@@ -1154,12 +1196,16 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
debug_depth--;
SDEBUG("<<\n");
+free:
if (ws)
kfree(ectx.ws);
+ return ret;
}
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
+ int r;
+
mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
@@ -1167,8 +1213,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
- atom_execute_table_locked(ctx, index, params);
+ r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
+ return r;
}
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1252,9 +1299,7 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- atom_execute_table(ctx, ATOM_CMD_INIT, ps);
-
- return 0;
+ return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
}
void atom_destroy(struct atom_context *ctx)
@@ -1264,12 +1309,16 @@ void atom_destroy(struct atom_context *ctx)
kfree(ctx);
}
-void atom_parse_data_header(struct atom_context *ctx, int index,
+bool atom_parse_data_header(struct atom_context *ctx, int index,
uint16_t * size, uint8_t * frev, uint8_t * crev,
uint16_t * data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
+ u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+ if (!mdt[index])
+ return false;
if (size)
*size = CU16(idx);
@@ -1278,38 +1327,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
if (crev)
*crev = CU8(idx + 3);
*data_start = idx;
- return;
+ return true;
}
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
uint8_t * crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
+ u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+ if (!mct[index])
+ return false;
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
- return;
+ return true;
}
int atom_allocate_fb_scratch(struct atom_context *ctx)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
- int usage_bytes;
+ int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
- atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+ if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
- firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+ firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
- DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
-
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ }
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index bc73781423a1..cd1b64ab5ca7 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -140,11 +140,13 @@ struct atom_context {
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
-void atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
-void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+ uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+ uint8_t *frev, uint8_t *crev);
int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b17..1bc72c3190a9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/****************************************************************************/
+
+/****************************************************************************/
/*Portion I: Definitions shared between VBIOS and Driver */
/****************************************************************************/
+
#ifndef _ATOMBIOS_H
#define _ATOMBIOS_H
@@ -40,39 +42,46 @@
#endif
#ifdef _H2INC
-#ifndef ULONG
-typedef unsigned long ULONG;
-#endif
+ #ifndef ULONG
+ typedef unsigned long ULONG;
+ #endif
-#ifndef UCHAR
-typedef unsigned char UCHAR;
-#endif
+ #ifndef UCHAR
+ typedef unsigned char UCHAR;
+ #endif
-#ifndef USHORT
-typedef unsigned short USHORT;
-#endif
+ #ifndef USHORT
+ typedef unsigned short USHORT;
+ #endif
#endif
-
-#define ATOM_DAC_A 0
+
+#define ATOM_DAC_A 0
#define ATOM_DAC_B 1
#define ATOM_EXT_DAC 2
#define ATOM_CRTC1 0
#define ATOM_CRTC2 1
+#define ATOM_CRTC3 2
+#define ATOM_CRTC4 3
+#define ATOM_CRTC5 4
+#define ATOM_CRTC6 5
+#define ATOM_CRTC_INVALID 0xFF
#define ATOM_DIGA 0
#define ATOM_DIGB 1
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
+#define ATOM_DCPLL 2
+#define ATOM_PPLL_INVALID 0xFF
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
-#define ATOM_SCALER_DISABLE 0
-#define ATOM_SCALER_CENTER 1
-#define ATOM_SCALER_EXPANSION 2
-#define ATOM_SCALER_MULTI_EX 3
+#define ATOM_SCALER_DISABLE 0
+#define ATOM_SCALER_CENTER 1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX 3
#define ATOM_DISABLE 0
#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS (ATOM_DISABLE+8)
#define ATOM_BLANKING 1
#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
#define ATOM_DAC2_CV ATOM_DAC1_CV
#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
#define ATOM_DAC2_PAL ATOM_DAC1_PAL
-
+
#define ATOM_PM_ON 0
#define ATOM_PM_STANDBY 1
#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
+
#define MEMTYPE_DDR1 "DDR1"
#define MEMTYPE_DDR2 "DDR2"
#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
/* Maximum size of that FireGL flag string */
-#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
-#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
+#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
-#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
-#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
-#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
-#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
+#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
#define HW_ASSISTED_I2C_STATUS_FAILURE 2
#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
-#pragma pack(1) /* BIOS data must use byte aligment */
+#pragma pack(1) /* BIOS data must use byte aligment */
/* Define offset to location of ROM header. */
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
-#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
/* Common header for all ROM Data tables.
- Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
+ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
And the pointer actually points to this header. */
-typedef struct _ATOM_COMMON_TABLE_HEADER {
- USHORT usStructureSize;
- UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
- UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
- /*Image can't be updated, while Driver needs to carry the new table! */
-} ATOM_COMMON_TABLE_HEADER;
-
-typedef struct _ATOM_ROM_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
- atombios should init it as "ATOM", don't change the position */
- USHORT usBiosRuntimeSegmentAddress;
- USHORT usProtectedModeInfoOffset;
- USHORT usConfigFilenameOffset;
- USHORT usCRC_BlockOffset;
- USHORT usBIOS_BootupMessageOffset;
- USHORT usInt10Offset;
- USHORT usPciBusDevInitCode;
- USHORT usIoBaseAddress;
- USHORT usSubsystemVendorID;
- USHORT usSubsystemID;
- USHORT usPCI_InfoOffset;
- USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
- USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
- UCHAR ucExtendedFunctionCode;
- UCHAR ucReserved;
-} ATOM_ROM_HEADER;
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+ USHORT usStructureSize;
+ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
+ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
+ /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+typedef struct _ATOM_ROM_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
+ atombios should init it as "ATOM", don't change the position */
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+}ATOM_ROM_HEADER;
/*==============================Command Table Portion==================================== */
#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
+ #define UTEMP USHORT
+ #define USHORT void*
#endif
-typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
- USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
- USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
- USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
- USHORT DIGxEncoderControl; /* Only used by Bios */
- USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
- USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
- USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
- USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
- USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
- USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryPLLInit;
- USHORT AdjustDisplayPll; /* only used by Bios */
- USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
- USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
- USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
- USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetConditionalGoldenSetting; /* only used by Bios */
- USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableScaler; /* Atomic Table, used only by Bios */
- USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
- USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
- USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
- USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
- USHORT UpdateCRTC_DoubleBufferRegisters;
- USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
- USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
- USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
- USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
- USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
- USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
- USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
- USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
- USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
- USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
- USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
- USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
- USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryTraining; /* Atomic Table, used only by Bios */
- USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
- USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
- USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
- USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
- USHORT DPEncoderService; /* Function Table,only used by Bios */
-} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
-
-/* For backward compatible */
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
+ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
+ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
+ USHORT DIGxEncoderControl; //Only used by Bios
+ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
+ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
+ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT GPIOPinControl; //Atomic Table, only used by Bios
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryPLLInit;
+ USHORT AdjustDisplayPll; //only used by Bios
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+ USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT GetConditionalGoldenSetting; //only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
+ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+ USHORT UpdateCRTC_DoubleBufferRegisters;
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
+ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
+ USHORT MemoryCleanUp; //Atomic Table, only used by Bios
+ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
+ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
+ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
+ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
+ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
+ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
+ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
+ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryTraining; //Atomic Table, used only by Bios
+ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+// For backward compatible
#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService ReadHWAssistedI2CStatus
+#define EnableVGA_Access GetSCLKOverMCLKRatio
-typedef struct _ATOM_MASTER_COMMAND_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
-} ATOM_MASTER_COMMAND_TABLE;
-
-/****************************************************************************/
-/* Structures used in every command table */
-/****************************************************************************/
-typedef struct _ATOM_TABLE_ATTRIBUTE {
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+// Structures used in every command table
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
#if ATOM_BIG_ENDIAN
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
#else
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
#endif
-} ATOM_TABLE_ATTRIBUTE;
+}ATOM_TABLE_ATTRIBUTE;
-typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
- ATOM_TABLE_ATTRIBUTE sbfAccess;
- USHORT susAccess;
-} ATOM_TABLE_ATTRIBUTE_ACCESS;
-
-/****************************************************************************/
-/* Common header for all command tables. */
-/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
-/* And the pointer actually points to this header. */
-/****************************************************************************/
-typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
- ATOM_COMMON_TABLE_HEADER CommonHeader;
- ATOM_TABLE_ATTRIBUTE TableAttribute;
-} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+ ATOM_TABLE_ATTRIBUTE sbfAccess;
+ USHORT susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER CommonHeader;
+ ATOM_TABLE_ATTRIBUTE TableAttribute;
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
-/****************************************************************************/
-/* Structures used by ComputeMemoryEnginePLLTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
- ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
- UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
- UCHAR ucReserved; /* may expand to return larger Fbdiv later */
- UCHAR ucFbDiv; /* return value */
- UCHAR ucPostDiv; /* return value */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
-
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
- ULONG ulClock; /* When return, [23:0] return real clock */
- UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
- USHORT usFbDiv; /* return Feedback value to be written to register */
- UCHAR ucPostDiv; /* return post div to be written to register */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+ UCHAR ucAction; //0:reserved //1:Memory //2:Engine
+ UCHAR ucReserved; //may expand to return larger Fbdiv later
+ UCHAR ucFbDiv; //return value
+ UCHAR ucPostDiv; //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+ ULONG ulClock; //When return, [23:0] return real clock
+ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+ USHORT usFbDiv; //return Feedback value to be written to register
+ UCHAR ucPostDiv; //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
-#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
-#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+
+#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
-#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
-typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
#if ATOM_BIG_ENDIAN
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
- ULONG ulClockFreq:24; /* in unit of 10kHz */
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+ ULONG ulClockFreq:24; // in unit of 10kHz
#else
- ULONG ulClockFreq:24; /* in unit of 10kHz */
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+ ULONG ulClockFreq:24; // in unit of 10kHz
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
#endif
-} ATOM_COMPUTE_CLOCK_FREQ;
+}ATOM_COMPUTE_CLOCK_FREQ;
-typedef struct _ATOM_S_MPLL_FB_DIVIDER {
- USHORT usFbDivFrac;
- USHORT usFbDiv;
-} ATOM_S_MPLL_FB_DIVIDER;
-
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
- union {
- ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
- ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
- };
- UCHAR ucRefDiv; /* Output Parameter */
- UCHAR ucPostDiv; /* Output Parameter */
- UCHAR ucCntlFlag; /* Output Parameter */
- UCHAR ucReserved;
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+ USHORT usFbDivFrac;
+ USHORT usFbDiv;
+}ATOM_S_MPLL_FB_DIVIDER;
-/* ucCntlFlag */
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ UCHAR ucCntlFlag; //Output Parameter
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
-typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulReserved[2];
-} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-
-typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulMemoryClock;
- ULONG ulReserved;
-} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
-/****************************************************************************/
-/* Structures used by SetEngineClockTable */
-/****************************************************************************/
-typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
-} SET_ENGINE_CLOCK_PARAMETERS;
-
-typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_ENGINE_CLOCK_PS_ALLOCATION;
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+#else
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
-/****************************************************************************/
-/* Structures used by SetMemoryClockTable */
-/****************************************************************************/
-typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} SET_MEMORY_CLOCK_PARAMETERS;
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_MEMORY_CLOCK_PS_ALLOCATION;
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulMemoryClock;
+ ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+// Structures used by SetEngineClockTable
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
-/****************************************************************************/
-/* Structures used by ASIC_Init.ctb */
-/****************************************************************************/
-typedef struct _ASIC_INIT_PARAMETERS {
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
-} ASIC_INIT_PARAMETERS;
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by SetMemoryClockTable
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
-typedef struct _ASIC_INIT_PS_ALLOCATION {
- ASIC_INIT_PARAMETERS sASICInitClocks;
- SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
-} ASIC_INIT_PS_ALLOCATION;
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS
+{
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
-/****************************************************************************/
-/* Structure used by DynamicClockGatingTable.ctb */
-/****************************************************************************/
-typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} DYNAMIC_CLOCK_GATING_PARAMETERS;
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+ ASIC_INIT_PARAMETERS sASICInitClocks;
+ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
-/****************************************************************************/
-/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
-/****************************************************************************/
-typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+/****************************************************************************/
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
-/****************************************************************************/
-/* Structures used by DAC_LoadDetectionTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
- USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
- UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
- UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
-} DAC_LOAD_DETECTION_PARAMETERS;
+/****************************************************************************/
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+ UCHAR ucMisc; //Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
-/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
#define DAC_LOAD_MISC_YPrPb 0x01
-typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
- DAC_LOAD_DETECTION_PARAMETERS sDacload;
- ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
-} DAC_LOAD_DETECTION_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- /* 7: ATOM_ENCODER_INIT Initialize DAC */
-} DAC_ENCODER_CONTROL_PARAMETERS;
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+ DAC_LOAD_DETECTION_PARAMETERS sDacload;
+ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by DIG1EncoderControlTable */
-/* DIG2EncoderControlTable */
-/* ExternalEncoderControlTable */
-/****************************************************************************/
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucConfig;
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [3] Transmitter Sel */
- /* =0: UNIPHY or PCIEPHY */
- /* =1: LVTMA */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS;
+/****************************************************************************/
+// Structures used by DIG1EncoderControlTable
+// DIG2EncoderControlTable
+// ExternalEncoderControlTable
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucConfig;
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [3] Transmitter Sel
+ // =0: UNIPHY or PCIEPHY
+ // =1: LVTMA
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
-/* ucConfig */
+//ucConfig
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
#define ATOM_ENCODER_CONFIG_LVTMA 0x08
#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
-#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
-/* ucAction */
-/* ATOM_ENABLE: Enable Encoder */
-/* ATOM_DISABLE: Disable Encoder */
+#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE: Enable Encoder
+// ATOM_DISABLE: Disable Encoder
-/* ucEncoderMode */
+//ucEncoderMode
#define ATOM_ENCODER_MODE_DP 0
#define ATOM_ENCODER_MODE_LVDS 1
#define ATOM_ENCODER_MODE_DVI 2
#define ATOM_ENCODER_MODE_HDMI 3
#define ATOM_ENCODER_MODE_SDVO 4
+#define ATOM_ENCODER_MODE_DP_AUDIO 5
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
-typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucReserved1:2;
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucReserved:1;
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
+ UCHAR ucReserved1:2;
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucReserved:1;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
- UCHAR ucReserved:1;
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucReserved1:2;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:1;
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucReserved1:2;
#endif
-} ATOM_DIG_ENCODER_CONFIG_V2;
+}ATOM_DIG_ENCODER_CONFIG_V2;
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
- UCHAR ucAction;
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS_V2;
-/* ucConfig */
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
-/****************************************************************************/
-/* Structures used by UNIPHYTransmitterControlTable */
-/* LVTMATransmitterControlTable */
-/* DVOOutputControlTable */
-/****************************************************************************/
-typedef struct _ATOM_DP_VS_MODE {
- UCHAR ucLaneSel;
- UCHAR ucLaneSet;
-} ATOM_DP_VS_MODE;
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
+#define ATOM_ENCODER_CMD_SETUP 0x0f
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved:3;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:3;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+
+// define ucBitPerColor:
+#define PANEL_BPC_UNDEFINE 0x00
+#define PANEL_6BIT_PER_COLOR 0x01
+#define PANEL_8BIT_PER_COLOR 0x02
+#define PANEL_10BIT_PER_COLOR 0x03
+#define PANEL_12BIT_PER_COLOR 0x04
+#define PANEL_16BIT_PER_COLOR 0x05
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable
+// LVTMATransmitterControlTable
+// DVOOutputControlTable
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE
+{
+ UCHAR ucLaneSel;
+ UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- UCHAR ucConfig;
- /* [0]=0: 4 lane Link, */
- /* =1: 8 lane Link ( Dual Links TMDS ) */
- /* [1]=0: InCoherent mode */
- /* =1: Coherent Mode */
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [5:4]PCIE lane Sel */
- /* =0: lane 0~3 or 0~7 */
- /* =1: lane 4~7 */
- /* =2: lane 8~11 or 8~15 */
- /* =3: lane 12~15 */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS;
-
-#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
-
-/* ucInitInfo */
-#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
-
-/* ucConfig */
+ UCHAR ucConfig;
+ // [0]=0: 4 lane Link,
+ // =1: 8 lane Link ( Dual Links TMDS )
+ // [1]=0: InCoherent mode
+ // =1: Coherent Mode
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [5:4]PCIE lane Sel
+ // =0: lane 0~3 or 0~7
+ // =1: lane 4~7
+ // =2: lane 8~11 or 8~15
+ // =3: lane 12~15
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+//ucConfig
#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
-#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
-#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
-/* ucAction */
+//ucAction
#define ATOM_TRANSMITTER_ACTION_DISABLE 0
#define ATOM_TRANSMITTER_ACTION_ENABLE 1
#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
#define ATOM_TRANSMITTER_ACTION_SETUP 10
#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
-/* Following are used for DigTransmitterControlTable ver1.2 */
-typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
- UCHAR ucReserved:1;
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
-
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucReserved:1;
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
#else
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucReserved:1;
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucReserved:1;
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
#endif
-} ATOM_DIG_TRANSMITTER_CONFIG_V2;
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
-/* ucConfig */
-/* Bit0 */
+//ucConfig
+//Bit0
#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
-/* Bit1 */
+//Bit1
#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
-/* Bit2 */
+//Bit2
#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
-#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
-/* Bit3 */
+// Bit3
#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
-/* Bit4 */
+// Bit4
#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
-/* Bit7:6 */
+// Bit7:6
#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
- UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
-/****************************************************************************/
-/* Structures used by DAC1OuputControlTable */
-/* DAC2OuputControlTable */
-/* LVTMAOutputControlTable (Before DEC30) */
-/* TMDSAOutputControlTable (Before DEC30) */
-/****************************************************************************/
-typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
- UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
- /* When the display is LCD, in addition to above: */
- /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
- /* ATOM_LCD_SELFTEST_STOP */
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
- UCHAR aucPadding[3]; /* padding to DWORD aligned */
-} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by DAC1OuputControlTable
+// DAC2OuputControlTable
+// LVTMAOutputControlTable (Before DEC30)
+// TMDSAOutputControlTable (Before DEC30)
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
+ // When the display is LCD, in addition to above:
+ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+ // ATOM_LCD_SELFTEST_STOP
+
+ UCHAR aucPadding[3]; // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
-#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
-#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by BlankCRTCTable */
-/****************************************************************************/
-typedef struct _BLANK_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
- USHORT usBlackColorRCr;
- USHORT usBlackColorGY;
- USHORT usBlackColorBCb;
-} BLANK_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by BlankCRTCTable
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
+ USHORT usBlackColorRCr;
+ USHORT usBlackColorGY;
+ USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableCRTCTable */
-/* EnableCRTCMemReqTable */
-/* UpdateCRTC_DoubleBufferRegistersTable */
-/****************************************************************************/
-typedef struct _ENABLE_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by EnableCRTCTable
+// EnableCRTCMemReqTable
+// UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_OverScanTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
- USHORT usOverscanRight; /* right */
- USHORT usOverscanLeft; /* left */
- USHORT usOverscanBottom; /* bottom */
- USHORT usOverscanTop; /* top */
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_OVERSCAN_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+ USHORT usOverscanRight; // right
+ USHORT usOverscanLeft; // left
+ USHORT usOverscanBottom; // bottom
+ USHORT usOverscanTop; // top
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_ReplicationTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
- UCHAR ucH_Replication; /* horizontal replication */
- UCHAR ucV_Replication; /* vertical replication */
- UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding;
-} SET_CRTC_REPLICATION_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+ UCHAR ucH_Replication; // horizontal replication
+ UCHAR ucV_Replication; // vertical replication
+ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
-/****************************************************************************/
-/* Structures used by SelectCRTC_SourceTable */
-/****************************************************************************/
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
- UCHAR ucPadding[2];
-} SELECT_CRTC_SOURCE_PARAMETERS;
+/****************************************************************************/
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+ UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
- UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
- UCHAR ucPadding;
-} SELECT_CRTC_SOURCE_PARAMETERS_V2;
-
-/* ucEncoderID */
-/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
-/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
-/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
-/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
-/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
-/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
-/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
-/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
-
-/* ucEncodeMode */
-/* #define ATOM_ENCODER_MODE_DP 0 */
-/* #define ATOM_ENCODER_MODE_LVDS 1 */
-/* #define ATOM_ENCODER_MODE_DVI 2 */
-/* #define ATOM_ENCODER_MODE_HDMI 3 */
-/* #define ATOM_ENCODER_MODE_SDVO 4 */
-/* #define ATOM_ENCODER_MODE_TV 13 */
-/* #define ATOM_ENCODER_MODE_CV 14 */
-/* #define ATOM_ENCODER_MODE_CRT 15 */
-
-/****************************************************************************/
-/* Structures used by SetPixelClockTable */
-/* GetPixelClockTable */
-/****************************************************************************/
-/* Major revision=1., Minor revision=1 */
-typedef struct _PIXEL_CLOCK_PARAMETERS {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucPadding;
-} PIXEL_CLOCK_PARAMETERS;
-
-/* Major revision=1., Minor revision=2, add ucMiscIfno */
-/* ucMiscInfo: */
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
+ UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID 0x00
+//#define ASIC_INT_TV_ENCODER_ID 0x02
+//#define ASIC_INT_DIG1_ENCODER_ID 0x03
+//#define ASIC_INT_DAC2_ENCODER_ID 0x04
+//#define ASIC_EXT_TV_ENCODER_ID 0x06
+//#define ASIC_INT_DVO_ENCODER_ID 0x07
+//#define ASIC_INT_DIG2_ENCODER_ID 0x09
+//#define ASIC_EXT_DIG_ENCODER_ID 0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP 0
+//#define ATOM_ENCODER_MODE_LVDS 1
+//#define ATOM_ENCODER_MODE_DVI 2
+//#define ATOM_ENCODER_MODE_HDMI 3
+//#define ATOM_ENCODER_MODE_SDVO 4
+//#define ATOM_ENCODER_MODE_TV 13
+//#define ATOM_ENCODER_MODE_CV 14
+//#define ATOM_ENCODER_MODE_CRT 15
+
+/****************************************************************************/
+// Structures used by SetPixelClockTable
+// GetPixelClockTable
+/****************************************************************************/
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
#define MISC_DEVICE_INDEX_MASK 0xF0
#define MISC_DEVICE_INDEX_SHIFT 4
-typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
-} PIXEL_CLOCK_PARAMETERS_V2;
-
-/* Major revision=1., Minor revision=3, structure/definition change */
-/* ucEncoderMode: */
-/* ATOM_ENCODER_MODE_DP */
-/* ATOM_ENOCDER_MODE_LVDS */
-/* ATOM_ENOCDER_MODE_DVI */
-/* ATOM_ENOCDER_MODE_HDMI */
-/* ATOM_ENOCDER_MODE_SDVO */
-/* ATOM_ENCODER_MODE_TV 13 */
-/* ATOM_ENCODER_MODE_CV 14 */
-/* ATOM_ENCODER_MODE_CRT 15 */
-
-/* ucDVOConfig */
-/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
-/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
-/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
-/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
-/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
-/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
-/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
-
-/* ucMiscInfo: also changed, see below */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV 13
+//ATOM_ENCODER_MODE_CV 14
+//ATOM_ENCODER_MODE_CRT 15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+//#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+//ucMiscInfo: also changed, see below
#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
-typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
- union {
- UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
- UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
+ union
+ {
+ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
};
- UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
- /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
-} PIXEL_CLOCK_PARAMETERS_V3;
+ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+ // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
-/****************************************************************************/
-/* Structures used by AdjustDisplayPllTable */
-/****************************************************************************/
-typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ union{
+ UCHAR ucReserved;
+ UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
+ };
+ USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
+ // 0 means disable PPLL/DCPLL.
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+ PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+ UCHAR ucStatus;
+ UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+ UCHAR ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+ PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
USHORT usPixelClock;
UCHAR ucTransmitterID;
UCHAR ucEncodeMode;
- union {
- UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
- UCHAR ucConfig; /* if none DVO, not defined yet */
+ union
+ {
+ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
+ UCHAR ucConfig; //if none DVO, not defined yet
};
UCHAR ucReserved[3];
-} ADJUST_DISPLAY_PLL_PARAMETERS;
+}ADJUST_DISPLAY_PLL_PARAMETERS;
#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
-
#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableYUVTable */
-/****************************************************************************/
-typedef struct _ENABLE_YUV_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
- UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
- UCHAR ucPadding[2];
-} ENABLE_YUV_PARAMETERS;
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+ USHORT usPixelClock; // target pixel clock
+ UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+ UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+ UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+ ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+ UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+ UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+ UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+ union
+ {
+ ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
+ ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+ };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/
+// Structures used by EnableYUVTable
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
+ UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetMemoryClockTable */
-/****************************************************************************/
-typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetMemoryClockTable
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
} GET_MEMORY_CLOCK_PARAMETERS;
#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetEngineClockTable */
-/****************************************************************************/
-typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetEngineClockTable
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
} GET_ENGINE_CLOCK_PARAMETERS;
#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Following Structures and constant may be obsolete */
-/****************************************************************************/
-/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
-/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
-typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
- USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
- /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
- UCHAR ucSlaveAddr; /* Read from which slave */
- UCHAR ucLineNumber; /* Read from which HW assisted line */
-} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+/****************************************************************************/
+// Following Structures and constant may be obsolete
+/****************************************************************************/
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
+ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
+ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
+ UCHAR ucSlaveAddr; //Read from which slave
+ UCHAR ucLineNumber; //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
-typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usByteOffset; /* Write to which byte */
- /* Upper portion of usByteOffset is Format of data */
- /* 1bytePS+offsetPS */
- /* 2bytesPS+offsetPS */
- /* blockID+offsetPS */
- /* blockID+offsetID */
- /* blockID+counterID+offsetID */
- UCHAR ucData; /* PS data1 */
- UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usByteOffset; //Write to which byte
+ //Upper portion of usByteOffset is Format of data
+ //1bytePS+offsetPS
+ //2bytesPS+offsetPS
+ //blockID+offsetPS
+ //blockID+offsetID
+ //blockID+counterID+offsetID
+ UCHAR ucData; //PS data1
+ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} SET_UP_HW_I2C_DATA_PARAMETERS;
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/****************************************************************************/
-/* Structures used by PowerConnectorDetectionTable */
-/****************************************************************************/
-typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucPwrBehaviorId;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
-} POWER_CONNECTOR_DETECTION_PARAMETERS;
-
-typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucReserved;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucPwrBehaviorId;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucReserved;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
/****************************LVDS SS Command Table Definitions**********************/
-/****************************************************************************/
-/* Structures used by EnableSpreadSpectrumOnPPLLTable */
-/****************************************************************************/
-typedef struct _ENABLE_LVDS_SS_PARAMETERS {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_LVDS_SS_PARAMETERS;
-
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPadding;
-} ENABLE_LVDS_SS_PARAMETERS_V2;
-
-/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
-typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
-} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+/****************************************************************************/
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
-typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
- PIXEL_CLOCK_PARAMETERS sPCLKInput;
- ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
-} SET_PIXEL_CLOCK_PS_ALLOCATION;
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+ PIXEL_CLOCK_PARAMETERS sPCLKInput;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _MEMORY_TRAINING_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} MEMORY_TRAINING_PARAMETERS;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
/****************************LVDS and other encoder command table definitions **********************/
-/****************************************************************************/
-/* Structures used by LVDSEncoderControlTable (Before DCE30) */
-/* LVTMAEncoderControlTable (Before DCE30) */
-/* TMDSAEncoderControlTable (Before DCE30) */
-/****************************************************************************/
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* bit0=0: Enable single link */
- /* =1: Enable dual link */
- /* Bit1=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} LVDS_ENCODER_CONTROL_PARAMETERS;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+/****************************************************************************/
+// Structures used by LVDSEncoderControlTable (Before DCE30)
+// LVTMAEncoderControlTable (Before DCE30)
+// TMDSAEncoderControlTable (Before DCE30)
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // bit0=0: Enable single link
+ // =1: Enable dual link
+ // Bit1=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- UCHAR ucTruncate; /* bit0=0: Disable truncate */
- /* =1: Enable truncate */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
- /* =1: Enable spatial dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
- /* =1: Enable temporal dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- /* bit5=0: Gray level 2 */
- /* =1: Gray level 4 */
- UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
- /* =1: 25FRC_SEL pattern F */
- /* bit6:5=0: 50FRC_SEL pattern A */
- /* =1: 50FRC_SEL pattern B */
- /* =2: 50FRC_SEL pattern C */
- /* =3: 50FRC_SEL pattern D */
- /* bit7=0: 75FRC_SEL pattern E */
- /* =1: 75FRC_SEL pattern F */
-} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ UCHAR ucTruncate; // bit0=0: Disable truncate
+ // =1: Enable truncate
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucSpatial; // bit0=0: Disable spatial dithering
+ // =1: Enable spatial dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucTemporal; // bit0=0: Disable temporal dithering
+ // =1: Enable temporal dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ // bit5=0: Gray level 2
+ // =1: Gray level 4
+ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
+ // =1: 25FRC_SEL pattern F
+ // bit6:5=0: 50FRC_SEL pattern A
+ // =1: 50FRC_SEL pattern B
+ // =2: 50FRC_SEL pattern C
+ // =3: 50FRC_SEL pattern D
+ // bit7=0: 75FRC_SEL pattern E
+ // =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
-
+
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
- UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
- UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
- UCHAR ucPadding[2];
-} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
-
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{
+ UCHAR ucEnable; // Enable or Disable External TMDS encoder
+ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+ UCHAR ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
-typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
- DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used by DVOEncoderControlTable */
-/****************************************************************************/
-/* ucTableFormatRevision=1,ucTableContentRevision=3 */
+/****************************************************************************/
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/
+//ucTableFormatRevision=1,ucTableContentRevision=3
-/* ucDVOConfig: */
+//ucDVOConfig:
#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
#define DVO_ENCODER_CONFIG_24BIT 0x08
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
- USHORT usPixelClock;
- UCHAR ucDVOConfig;
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- UCHAR ucReseved[4];
-} DVO_ENCODER_CONTROL_PARAMETERS_V3;
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock;
+ UCHAR ucDVOConfig;
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ UCHAR ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
-/* bit1=0: non-coherent mode */
-/* =1: coherent mode */
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
+// bit1=0: non-coherent mode
+// =1: coherent mode
-/* ========================================================================================== */
-/* Only change is here next time when changing encoder parameter definitions again! */
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
-/* ========================================================================================== */
+//==========================================================================================
#define PANEL_ENCODER_MISC_DUAL 0x01
#define PANEL_ENCODER_MISC_COHERENT 0x02
#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define PANEL_ENCODER_75FRC_E 0x00
#define PANEL_ENCODER_75FRC_F 0x80
-/****************************************************************************/
-/* Structures used by SetVoltageTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by SetVoltageTable
+/****************************************************************************/
#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
#define SET_VOLTAGE_INIT_MODE 5
-#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
+#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
-#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
-typedef struct _SET_VOLTAGE_PARAMETERS {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
- UCHAR ucVoltageIndex; /* An index to tell which voltage level */
- UCHAR ucReserved;
-} SET_VOLTAGE_PARAMETERS;
-
-typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
- USHORT usVoltageLevel; /* real voltage level */
-} SET_VOLTAGE_PARAMETERS_V2;
-
-typedef struct _SET_VOLTAGE_PS_ALLOCATION {
- SET_VOLTAGE_PARAMETERS sASICSetVoltage;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} SET_VOLTAGE_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by TVEncoderControlTable */
-/****************************************************************************/
-typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} TV_ENCODER_CONTROL_PARAMETERS;
-
-typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
- TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
-} TV_ENCODER_CONTROL_PS_ALLOCATION;
-
-/* ==============================Data Table Portion==================================== */
-
-#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
-#endif
-
-/****************************************************************************/
-/* Structure used in Data.mtb */
-/****************************************************************************/
-typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
- USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
- USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
- USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
- USHORT StandardVESA_Timing; /* Only used by Bios */
- USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
- USHORT DAC_Info; /* Will be obsolete from R600 */
- USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT TMDS_Info; /* Will be obsolete from R600 */
- USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
- USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
- USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
- USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
- USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
- USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
- USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
- USHORT CompassionateData; /* Will be obsolete from R600 */
- USHORT SaveRestoreInfo; /* Only used by Bios */
- USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
- USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
- USHORT XTMDS_Info; /* Will be obsolete from R600 */
- USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
- USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
- USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
- USHORT MC_InitParameter; /* Only used by command table */
- USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
- USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
- USHORT TV_VideoMode; /* Only used by command table */
- USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
- USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
- USHORT IntegratedSystemInfo; /* Shared by various SW components */
- USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
- USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
- USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
-} ATOM_MASTER_LIST_OF_DATA_TABLES;
-
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
+typedef struct _SET_VOLTAGE_PARAMETERS
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
+ UCHAR ucVoltageIndex; // An index to tell which voltage level
+ UCHAR ucReserved;
+}SET_VOLTAGE_PARAMETERS;
-typedef struct _ATOM_MASTER_DATA_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
-} ATOM_MASTER_DATA_TABLE;
+typedef struct _SET_VOLTAGE_PARAMETERS_V2
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
+ USHORT usVoltageLevel; // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
-/****************************************************************************/
-/* Structure used in MultimediaCapabilityInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* HW info table signature string "$ATI" */
- UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
- UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
- UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
- UCHAR ucHostPortInfo; /* Provides host port configuration information */
-} ATOM_MULTIMEDIA_CAPABILITY_INFO;
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by TVEncoderControlTable
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
-/****************************************************************************/
-/* Structure used in MultimediaConfigInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* MM info table signature sting "$MMT" */
- UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
- UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
- UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
- UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
- UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
- UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
- UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
- UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
-} ATOM_MULTIMEDIA_CONFIG_INFO;
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used in FirmwareInfoTable */
-/****************************************************************************/
+//==============================Data Table Portion====================================
-/* usBIOSCapability Definition: */
-/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
-/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
-/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
-/* Others: Reserved */
+/****************************************************************************/
+// Structure used in Data.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
+ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+ USHORT DAC_Info; // Will be obsolete from R600
+ USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT TMDS_Info; // Will be obsolete from R600
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
+ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
+ USHORT VESA_ToInternalModeLUT; // Only used by Bios
+ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
+ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
+ USHORT CompassionateData; // Will be obsolete from R600
+ USHORT SaveRestoreInfo; // Only used by Bios
+ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
+ USHORT XTMDS_Info; // Will be obsolete from R600
+ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+ USHORT Object_Header; // Shared by various SW components,latest version 1.1
+ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
+ USHORT MC_InitParameter; // Only used by command table
+ USHORT ASIC_VDDC_Info; // Will be obsolete from R600
+ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+ USHORT TV_VideoMode; // Only used by command table
+ USHORT VRAM_Info; // Only used by command table, latest version 1.3
+ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+ USHORT IntegratedSystemInfo; // Shared by various SW components
+ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+/****************************************************************************/
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // HW info table signature string "$ATI"
+ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+ UCHAR ucVideoPortInfo; // Provides the video port capabilities
+ UCHAR ucHostPortInfo; // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // MM info table signature sting "$MMT"
+ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
+ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
+ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+/****************************************************************************/
+// Structures used in FirmwareInfoTable
+/****************************************************************************/
+
+// usBIOSCapability Defintion:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
+// Others: Reserved
#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
-#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
-#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
#ifndef _H2INC
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_FIRMWARE_CAPABILITY {
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:3;
- USHORT HyperMemory_Size:4;
- USHORT HyperMemory_Support:1;
- USHORT PPMode_Assigned:1;
- USHORT WMI_SUPPORT:1;
- USHORT GPUControlsBL:1;
- USHORT EngineClockSS_Support:1;
- USHORT MemoryClockSS_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT DualCRTC_Support:1;
- USHORT FirmwarePosted:1;
+ USHORT Reserved:3;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT GPUControlsBL:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT DualCRTC_Support:1;
+ USHORT FirmwarePosted:1;
#else
- USHORT FirmwarePosted:1;
- USHORT DualCRTC_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT MemoryClockSS_Support:1;
- USHORT EngineClockSS_Support:1;
- USHORT GPUControlsBL:1;
- USHORT WMI_SUPPORT:1;
- USHORT PPMode_Assigned:1;
- USHORT HyperMemory_Support:1;
- USHORT HyperMemory_Size:4;
- USHORT Reserved:3;
+ USHORT FirmwarePosted:1;
+ USHORT DualCRTC_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT GPUControlsBL:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+ USHORT Reserved:3;
#endif
-} ATOM_FIRMWARE_CAPABILITY;
+}ATOM_FIRMWARE_CAPABILITY;
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- ATOM_FIRMWARE_CAPABILITY sbfAccess;
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ ATOM_FIRMWARE_CAPABILITY sbfAccess;
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#else
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#endif
-typedef struct _ATOM_FIRMWARE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucPadding[3]; /* Don't use them */
- ULONG aulReservedForBIOS[3]; /* Don't use them */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS[2]; /* Don't use them */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_2;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS; /* Don't use them */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_3;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- USHORT usBootUpVDDCVoltage; /* In MV unit */
- USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
- USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_4;
-
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
-
-/****************************************************************************/
-/* Structures used in IntegratedSystemInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_FIRMWARE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucPadding[3]; //Don't use them
+ ULONG aulReservedForBIOS[3]; //Don't use them
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS[2]; //Don't use them
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS; //Don't use them
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved1;
+ ULONG ulReserved2;
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
+ UCHAR ucReserved1; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+
+/****************************************************************************/
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/
#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
#define IGP_CAP_FLAG_AC_CARD 0x4
#define IGP_CAP_FLAG_SDVO_CARD 0x8
#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulBootUpMemoryClock; /* in 10kHz unit */
- ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
- ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
- UCHAR ucNumberOfCyclesInPeriodHi;
- UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
- USHORT usReserved1;
- USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
- USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
- ULONG ulReserved[2];
-
- USHORT usFSBClock; /* In MHz unit */
- USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
- /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
- /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
- USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
- USHORT usK8MemoryClock; /* in MHz unit */
- USHORT usK8SyncStartDelay; /* in 0.01 us unit */
- USHORT usK8DataReturnTime; /* in 0.01 us unit */
- UCHAR ucMaxNBVoltage;
- UCHAR ucMinNBVoltage;
- UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
- UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
- UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
- UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
- UCHAR ucMaxNBVoltageHigh;
- UCHAR ucMinNBVoltageHigh;
-} ATOM_INTEGRATED_SYSTEM_INFO;
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulBootUpMemoryClock; //in 10kHz unit
+ ULONG ulMaxSystemMemoryClock; //in 10kHz unit
+ ULONG ulMinSystemMemoryClock; //in 10kHz unit
+ UCHAR ucNumberOfCyclesInPeriodHi;
+ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+ USHORT usReserved1;
+ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
+ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
+ ULONG ulReserved[2];
+
+ USHORT usFSBClock; //In MHz unit
+ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+ //Bit[4]==1: P/2 mode, ==0: P/1 mode
+ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+ USHORT usK8MemoryClock; //in MHz unit
+ USHORT usK8SyncStartDelay; //in 0.01 us unit
+ USHORT usK8DataReturnTime; //in 0.01 us unit
+ UCHAR ucMaxNBVoltage;
+ UCHAR ucMinNBVoltage;
+ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
+ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
+ UCHAR ucMaxNBVoltageHigh;
+ UCHAR ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
-ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
+ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-usFSBClock: For Intel IGP,it's FSB Freq
+usFSBClock: For Intel IGP,it's FSB Freq
For AMD IGP,it's HT Link Speed
usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
-ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
-ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
*/
+
/*
The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
-Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
SW components can access the IGP system infor structure in the same way as before
*/
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
- ULONG ulBootUpUMAClock; /* in 10kHz unit */
- ULONG ulBootUpSidePortClock; /* in 10kHz unit */
- ULONG ulMinSidePortClock; /* in 10kHz unit */
- ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
- ULONG ulSystemConfig; /* see explanation below */
- ULONG ulBootUpReqDisplayVector;
- ULONG ulOtherDisplayMisc;
- ULONG ulDDISlot1Config;
- ULONG ulDDISlot2Config;
- UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
- UCHAR ucUMAChannelNumber;
- UCHAR ucDockingPinBit;
- UCHAR ucDockingPinPolarity;
- ULONG ulDockingPinCFGInfo;
- ULONG ulCPUCapInfo;
- USHORT usNumberOfCyclesInPeriod;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- ULONG ulHTLinkFreq; /* in 10Khz */
- USHORT usMinHTLinkWidth;
- USHORT usMaxHTLinkWidth;
- USHORT usUMASyncStartDelay;
- USHORT usUMADataReturnTime;
- USHORT usLinkStatusZeroTime;
- USHORT usReserved;
- ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
- ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
- USHORT usMaxUpStreamHTLinkWidth;
- USHORT usMaxDownStreamHTLinkWidth;
- USHORT usMinUpStreamHTLinkWidth;
- USHORT usMinDownStreamHTLinkWidth;
- ULONG ulReserved3[97]; /* must be 0x0 */
-} ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulReserved1[2]; //must be 0x0 for the reserved
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulBootUpSidePortClock; //in 10kHz unit
+ ULONG ulMinSidePortClock; //in 10kHz unit
+ ULONG ulReserved2[6]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //see explanation below
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulDDISlot1Config;
+ ULONG ulDDISlot2Config;
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ UCHAR ucDockingPinBit;
+ UCHAR ucDockingPinPolarity;
+ ULONG ulDockingPinCFGInfo;
+ ULONG ulCPUCapInfo;
+ USHORT usNumberOfCyclesInPeriod;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ ULONG ulHTLinkFreq; //in 10Khz
+ USHORT usMinHTLinkWidth;
+ USHORT usMaxHTLinkWidth;
+ USHORT usUMASyncStartDelay;
+ USHORT usUMADataReturnTime;
+ USHORT usLinkStatusZeroTime;
+ USHORT usDACEfuse; //for storing badgap value (for RS880 only)
+ ULONG ulHighVoltageHTLinkFreq; // in 10Khz
+ ULONG ulLowVoltageHTLinkFreq; // in 10Khz
+ USHORT usMaxUpStreamHTLinkWidth;
+ USHORT usMaxDownStreamHTLinkWidth;
+ USHORT usMinUpStreamHTLinkWidth;
+ USHORT usMinDownStreamHTLinkWidth;
+ USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+ USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+ ULONG ulReserved3[96]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;
/*
ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
-ulSystemConfig:
-Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
=0: system boots up at driver control state. Power state depends on PowerPlay table.
Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
Bit[3]=1: Only one power state(Performance) will be supported.
=0: Multiple power states supported from PowerPlay table.
-Bit[4]=1: CLMC is supported and enabled on current system.
- =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
-Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+Bit[4]=1: CLMC is supported and enabled on current system.
+ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
=0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
=0: Voltage settings is determined by powerplay table.
Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
=0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+ =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+ =0: DLL Shut Down feature is not enabled or supported on current system.
ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
- [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
[3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [15:8] - Lane configuration attribute;
+ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+ When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+ in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+ one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+ [15:8] - Lane configuration attribute;
[23:16]- Connector type, possible value:
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
CONNECTOR_OBJECT_ID_HDMI_TYPE_A
CONNECTOR_OBJECT_ID_DISPLAYPORT
+ CONNECTOR_OBJECT_ID_eDP
[31:24]- Reserved
ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
ucUMAChannelNumber: how many channels for the UMA;
-ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
-usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
-usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
- If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
usUMADataReturnTime: Memory access latency, required for watermark calculation
-usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
- This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ This must be less than or equal to ulHTLinkFreq(bootup frequency).
ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
This must be less than or equal to ulHighVoltageHTLinkFreq.
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
-#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
+ ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulReserved1[8]; //must be 0x0 for the reserved
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulReserved2[4]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //TBD
+ ULONG ulCPUCapInfo; //TBD
+ USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usBootUpNBVoltage; //boot up NB voltage
+ UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+ UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+ ULONG ulReserved3[4]; //must be 0x0 for the reserved
+ ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
+ ULONG ulDDISlot2Config;
+ ULONG ulDDISlot3Config;
+ ULONG ulDDISlot4Config;
+ ULONG ulReserved4[4]; //must be 0x0 for the reserved
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ USHORT usReserved;
+ ULONG ulReserved5[4]; //must be 0x0 for the reserved
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+ ULONG ulReserved6[61]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;
+
#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
-/* define ASIC internal encoder id ( bit vector ) */
-#define ASIC_INT_DAC1_ENCODER_ID 0x00
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID 0x00
#define ASIC_INT_TV_ENCODER_ID 0x02
#define ASIC_INT_DIG1_ENCODER_ID 0x03
#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
#define ASIC_INT_DVO_ENCODER_ID 0x07
#define ASIC_INT_DIG2_ENCODER_ID 0x09
#define ASIC_EXT_DIG_ENCODER_ID 0x05
+#define ASIC_EXT_DIG2_ENCODER_ID 0x08
+#define ASIC_INT_DIG3_ENCODER_ID 0x0a
+#define ASIC_INT_DIG4_ENCODER_ID 0x0b
+#define ASIC_INT_DIG5_ENCODER_ID 0x0c
+#define ASIC_INT_DIG6_ENCODER_ID 0x0d
-/* define Encoder attribute */
+//define Encoder attribute
#define ATOM_ANALOG_ENCODER 0
-#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DP_ENCODER 2
+
+#define ATOM_ENCODER_ENUM_MASK 0x70
+#define ATOM_ENCODER_ENUM_ID1 0x00
+#define ATOM_ENCODER_ENUM_ID2 0x10
+#define ATOM_ENCODER_ENUM_ID3 0x20
+#define ATOM_ENCODER_ENUM_ID4 0x30
+#define ATOM_ENCODER_ENUM_ID5 0x40
+#define ATOM_ENCODER_ENUM_ID6 0x50
#define ATOM_DEVICE_CRT1_INDEX 0x00000000
#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_DFP1_INDEX 0x00000003
#define ATOM_DEVICE_CRT2_INDEX 0x00000004
#define ATOM_DEVICE_LCD2_INDEX 0x00000005
-#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_DFP6_INDEX 0x00000006
#define ATOM_DEVICE_DFP2_INDEX 0x00000007
#define ATOM_DEVICE_CV_INDEX 0x00000008
-#define ATOM_DEVICE_DFP3_INDEX 0x00000009
-#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
-#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+#define ATOM_DEVICE_DFP3_INDEX 0x00000009
+#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+
#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
-#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
-#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
-#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
-#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
-#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
-#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
-#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
-#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
-#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
-#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
-#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
-#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
-#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
-
-#define ATOM_DEVICE_CRT_SUPPORT \
- (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
-#define ATOM_DEVICE_DFP_SUPPORT \
- (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
- ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
- ATOM_DEVICE_DFP5_SUPPORT)
-#define ATOM_DEVICE_TV_SUPPORT \
- (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
-#define ATOM_DEVICE_LCD_SUPPORT \
- (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
+#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
+#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
+
#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
-#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
-#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
-/* usDeviceSupport: */
-/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
-/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
-/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
-/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
-/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
-/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
-/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
-/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
-/* Bit 8 = 0 - no CV support= 1- CV is supported */
-/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
-/* Byte1 (Supported Device Info) */
-/* Bit 0 = = 0 - no CV support= 1- CV is supported */
-/* */
-/* */
-
-/* ucI2C_ConfigID */
-/* [7:0] - I2C LINE Associate ID */
-/* = 0 - no I2C */
-/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
-/* = 0, [6:0]=SW assisted I2C ID */
-/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
-/* = 2, HW engine for Multimedia use */
-/* = 3-7 Reserved for future I2C engines */
-/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
-
-typedef struct _ATOM_I2C_ID_CONFIG {
-#if ATOM_BIG_ENDIAN
- UCHAR bfHW_Capable:1;
- UCHAR bfHW_EngineID:3;
- UCHAR bfI2C_LineMux:4;
-#else
- UCHAR bfI2C_LineMux:4;
- UCHAR bfHW_EngineID:3;
- UCHAR bfHW_Capable:1;
-#endif
-} ATOM_I2C_ID_CONFIG;
-
-typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
- ATOM_I2C_ID_CONFIG sbfAccess;
- UCHAR ucAccess;
-} ATOM_I2C_ID_CONFIG_ACCESS;
+// usDeviceSupport:
+// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
+// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
+// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
+// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
+// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
+// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
+// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
+// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
+// Bit 8 = 0 - no CV support= 1- CV is supported
+// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
+// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
+// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
+//
+//
/****************************************************************************/
-/* Structure used in GPIO_I2C_InfoTable */
+/* Structure used in MclkSS_InfoTable */
/****************************************************************************/
-typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
- USHORT usClkMaskRegisterIndex;
- USHORT usClkEnRegisterIndex;
- USHORT usClkY_RegisterIndex;
- USHORT usClkA_RegisterIndex;
- USHORT usDataMaskRegisterIndex;
- USHORT usDataEnRegisterIndex;
- USHORT usDataY_RegisterIndex;
- USHORT usDataA_RegisterIndex;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
- UCHAR ucClkMaskShift;
- UCHAR ucClkEnShift;
- UCHAR ucClkY_Shift;
- UCHAR ucClkA_Shift;
- UCHAR ucDataMaskShift;
- UCHAR ucDataEnShift;
- UCHAR ucDataY_Shift;
- UCHAR ucDataA_Shift;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
-} ATOM_GPIO_I2C_ASSIGMENT;
-
-typedef struct _ATOM_GPIO_I2C_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_GPIO_I2C_INFO;
+// ucI2C_ConfigID
+// [7:0] - I2C LINE Associate ID
+// = 0 - no I2C
+// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
+// = 0, [6:0]=SW assisted I2C ID
+// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
+// = 2, HW engine for Multimedia use
+// = 3-7 Reserved for future I2C engines
+// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR bfHW_Capable:1;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfI2C_LineMux:4;
+#else
+ UCHAR bfI2C_LineMux:4;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
-/****************************************************************************/
-/* Common Structure used in other structures */
-/****************************************************************************/
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+ ATOM_I2C_ID_CONFIG sbfAccess;
+ UCHAR ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+
+
+/****************************************************************************/
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+ USHORT usClkMaskRegisterIndex;
+ USHORT usClkEnRegisterIndex;
+ USHORT usClkY_RegisterIndex;
+ USHORT usClkA_RegisterIndex;
+ USHORT usDataMaskRegisterIndex;
+ USHORT usDataEnRegisterIndex;
+ USHORT usDataY_RegisterIndex;
+ USHORT usDataA_RegisterIndex;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+ UCHAR ucClkMaskShift;
+ UCHAR ucClkEnShift;
+ UCHAR ucClkY_Shift;
+ UCHAR ucClkA_Shift;
+ UCHAR ucDataMaskShift;
+ UCHAR ucDataEnShift;
+ UCHAR ucDataY_Shift;
+ UCHAR ucDataA_Shift;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+// Common Structure used in other structures
+/****************************************************************************/
#ifndef _H2INC
-
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_MODE_MISC_INFO {
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:6;
- USHORT RGB888:1;
- USHORT DoubleClock:1;
- USHORT Interlace:1;
- USHORT CompositeSync:1;
- USHORT V_ReplicationBy2:1;
- USHORT H_ReplicationBy2:1;
- USHORT VerticalCutOff:1;
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HorizontalCutOff:1;
+ USHORT Reserved:6;
+ USHORT RGB888:1;
+ USHORT DoubleClock:1;
+ USHORT Interlace:1;
+ USHORT CompositeSync:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT VerticalCutOff:1;
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HorizontalCutOff:1;
#else
- USHORT HorizontalCutOff:1;
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VerticalCutOff:1;
- USHORT H_ReplicationBy2:1;
- USHORT V_ReplicationBy2:1;
- USHORT CompositeSync:1;
- USHORT Interlace:1;
- USHORT DoubleClock:1;
- USHORT RGB888:1;
- USHORT Reserved:6;
+ USHORT HorizontalCutOff:1;
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VerticalCutOff:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT CompositeSync:1;
+ USHORT Interlace:1;
+ USHORT DoubleClock:1;
+ USHORT RGB888:1;
+ USHORT Reserved:6;
#endif
-} ATOM_MODE_MISC_INFO;
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- ATOM_MODE_MISC_INFO sbfAccess;
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+}ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ ATOM_MODE_MISC_INFO sbfAccess;
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#else
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#endif
-/* usModeMiscInfo- */
+// usModeMiscInfo-
#define ATOM_H_CUTOFF 0x01
-#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
-#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
+#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
#define ATOM_V_CUTOFF 0x08
#define ATOM_H_REPLICATIONBY2 0x10
#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_DOUBLE_CLOCK_MODE 0x100
#define ATOM_RGB888_MODE 0x200
-/* usRefreshRate- */
+//usRefreshRate-
#define ATOM_REFRESH_43 43
#define ATOM_REFRESH_47 47
-#define ATOM_REFRESH_56 56
+#define ATOM_REFRESH_56 56
#define ATOM_REFRESH_60 60
#define ATOM_REFRESH_65 65
#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_REFRESH_75 75
#define ATOM_REFRESH_85 85
-/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
-/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
-/* */
-/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
-/* = EDID_HA + EDID_HBL */
-/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
-/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
-/* = EDID_HA + EDID_HSO */
-/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
-/* VESA_BORDER = EDID_BORDER */
-
-/****************************************************************************/
-/* Structure used in SetCRTC_UsingDTDTimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
- USHORT usH_Size;
- USHORT usH_Blanking_Time;
- USHORT usV_Size;
- USHORT usV_Blanking_Time;
- USHORT usH_SyncOffset;
- USHORT usH_SyncWidth;
- USHORT usV_SyncOffset;
- USHORT usV_SyncWidth;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucH_Border; /* From DFP EDID */
- UCHAR ucV_Border;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
-
-/****************************************************************************/
-/* Structure used in SetCRTC_TimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_TIMING_PARAMETERS {
- USHORT usH_Total; /* horizontal total */
- USHORT usH_Disp; /* horizontal display */
- USHORT usH_SyncStart; /* horozontal Sync start */
- USHORT usH_SyncWidth; /* horizontal Sync width */
- USHORT usV_Total; /* vertical total */
- USHORT usV_Disp; /* vertical display */
- USHORT usV_SyncStart; /* vertical Sync start */
- USHORT usV_SyncWidth; /* vertical Sync width */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucOverscanRight; /* right */
- UCHAR ucOverscanLeft; /* left */
- UCHAR ucOverscanBottom; /* bottom */
- UCHAR ucOverscanTop; /* top */
- UCHAR ucReserved;
-} SET_CRTC_TIMING_PARAMETERS;
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+// = EDID_HA + EDID_HBL
+// VESA_HDISP = VESA_ACTIVE = EDID_HA
+// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+// = EDID_HA + EDID_HSO
+// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
+// VESA_BORDER = EDID_BORDER
+
+/****************************************************************************/
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+ USHORT usH_Size;
+ USHORT usH_Blanking_Time;
+ USHORT usV_Size;
+ USHORT usV_Blanking_Time;
+ USHORT usH_SyncOffset;
+ USHORT usH_SyncWidth;
+ USHORT usV_SyncOffset;
+ USHORT usV_SyncWidth;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucH_Border; // From DFP EDID
+ UCHAR ucV_Border;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+ USHORT usH_Total; // horizontal total
+ USHORT usH_Disp; // horizontal display
+ USHORT usH_SyncStart; // horozontal Sync start
+ USHORT usH_SyncWidth; // horizontal Sync width
+ USHORT usV_Total; // vertical total
+ USHORT usV_Disp; // vertical display
+ USHORT usV_SyncStart; // vertical Sync start
+ USHORT usV_SyncWidth; // vertical Sync width
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucOverscanRight; // right
+ UCHAR ucOverscanLeft; // left
+ UCHAR ucOverscanBottom; // bottom
+ UCHAR ucOverscanTop; // top
+ UCHAR ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
-/****************************************************************************/
-/* Structure used in StandardVESA_TimingTable */
-/* AnalogTV_InfoTable */
-/* ComponentVideoInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MODE_TIMING {
- USHORT usCRTC_H_Total;
- USHORT usCRTC_H_Disp;
- USHORT usCRTC_H_SyncStart;
- USHORT usCRTC_H_SyncWidth;
- USHORT usCRTC_V_Total;
- USHORT usCRTC_V_Disp;
- USHORT usCRTC_V_SyncStart;
- USHORT usCRTC_V_SyncWidth;
- USHORT usPixelClock; /* in 10Khz unit */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- USHORT usCRTC_OverscanRight;
- USHORT usCRTC_OverscanLeft;
- USHORT usCRTC_OverscanBottom;
- USHORT usCRTC_OverscanTop;
- USHORT usReserve;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_MODE_TIMING;
-
-typedef struct _ATOM_DTD_FORMAT {
- USHORT usPixClk;
- USHORT usHActive;
- USHORT usHBlanking_Time;
- USHORT usVActive;
- USHORT usVBlanking_Time;
- USHORT usHSyncOffset;
- USHORT usHSyncWidth;
- USHORT usVSyncOffset;
- USHORT usVSyncWidth;
- USHORT usImageHSize;
- USHORT usImageVSize;
- UCHAR ucHBorder;
- UCHAR ucVBorder;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_DTD_FORMAT;
-
-/****************************************************************************/
-/* Structure used in LVDS_InfoTable */
-/* * Need a document to describe this table */
-/****************************************************************************/
+/****************************************************************************/
+// Structure used in StandardVESA_TimingTable
+// AnalogTV_InfoTable
+// ComponentVideoInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING
+{
+ USHORT usCRTC_H_Total;
+ USHORT usCRTC_H_Disp;
+ USHORT usCRTC_H_SyncStart;
+ USHORT usCRTC_H_SyncWidth;
+ USHORT usCRTC_V_Total;
+ USHORT usCRTC_V_Disp;
+ USHORT usCRTC_V_SyncStart;
+ USHORT usCRTC_V_SyncWidth;
+ USHORT usPixelClock; //in 10Khz unit
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ USHORT usCRTC_OverscanRight;
+ USHORT usCRTC_OverscanLeft;
+ USHORT usCRTC_OverscanBottom;
+ USHORT usCRTC_OverscanTop;
+ USHORT usReserve;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+ USHORT usPixClk;
+ USHORT usHActive;
+ USHORT usHBlanking_Time;
+ USHORT usVActive;
+ USHORT usVBlanking_Time;
+ USHORT usHSyncOffset;
+ USHORT usHSyncWidth;
+ USHORT usVSyncOffset;
+ USHORT usVSyncWidth;
+ USHORT usImageHSize;
+ USHORT usImageVSize;
+ UCHAR ucHBorder;
+ UCHAR ucVBorder;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+// Structure used in LVDS_InfoTable
+// * Need a document to describe this table
+/****************************************************************************/
#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
-/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
-/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
-#define LCDPANEL_CAP_READ_EDID 0x1
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_LVDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usModePatchTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
-} ATOM_LVDS_INFO;
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_LVDS_INFO_V12 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usExtInfoTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
- USHORT usLCDVenderID;
- USHORT usLCDProductID;
- UCHAR ucLCDPanel_SpecialHandlingCap;
- UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
- UCHAR ucReserved[2];
-} ATOM_LVDS_INFO_V12;
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usModePatchTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap;
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ UCHAR ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_READ_EDID 0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_eDP 0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
+#define PANEL_RANDOM_DITHER 0x80
+#define PANEL_RANDOM_DITHER_MASK 0x80
+
#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
-typedef struct _ATOM_PATCH_RECORD_MODE {
- UCHAR ucRecordType;
- USHORT usHDisp;
- USHORT usVDisp;
-} ATOM_PATCH_RECORD_MODE;
+typedef struct _ATOM_PATCH_RECORD_MODE
+{
+ UCHAR ucRecordType;
+ USHORT usHDisp;
+ USHORT usVDisp;
+}ATOM_PATCH_RECORD_MODE;
-typedef struct _ATOM_LCD_RTS_RECORD {
- UCHAR ucRecordType;
- UCHAR ucRTSValue;
-} ATOM_LCD_RTS_RECORD;
+typedef struct _ATOM_LCD_RTS_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
-/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
-typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
- UCHAR ucRecordType;
- USHORT usLCDCap;
-} ATOM_LCD_MODE_CONTROL_CAP;
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP
+{
+ UCHAR ucRecordType;
+ USHORT usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
#define LCD_MODE_CAP_BL_OFF 1
#define LCD_MODE_CAP_CRTC_OFF 2
#define LCD_MODE_CAP_PANEL_OFF 4
-typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
- UCHAR ucRecordType;
- UCHAR ucFakeEDIDLength;
- UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucFakeEDIDLength;
+ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
} ATOM_FAKE_EDID_PATCH_RECORD;
-typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
- UCHAR ucRecordType;
- USHORT usHSize;
- USHORT usVSize;
-} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ USHORT usHSize;
+ USHORT usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
/****************************Spread Spectrum Info Table Definitions **********************/
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSS_Step;
- UCHAR ucSS_Delay;
- UCHAR ucSS_Id;
- UCHAR ucRecommendedRef_Div;
- UCHAR ucSS_Range; /* it was reserved for V11 */
-} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
+ UCHAR ucSS_Step;
+ UCHAR ucSS_Delay;
+ UCHAR ucSS_Id;
+ UCHAR ucRecommendedRef_Div;
+ UCHAR ucSS_Range; //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
#define ATOM_MAX_SS_ENTRY 16
-#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
-#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
+#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
+#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
+#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
+
#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
#define ATOM_INTERNAL_SS_MASK 0x00000000
#define ATOM_EXTERNAL_SS_MASK 0x00000002
#define EXEC_SS_STEP_SIZE_SHIFT 2
-#define EXEC_SS_DELAY_SHIFT 4
+#define EXEC_SS_DELAY_SHIFT 4
#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
-typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
-} ATOM_SPREAD_SPECTRUM_INFO;
-
-/****************************************************************************/
-/* Structure used in AnalogTV_InfoTable (Top level) */
-/****************************************************************************/
-/* ucTVBootUpDefaultStd definiton: */
-
-/* ATOM_TV_NTSC 1 */
-/* ATOM_TV_NTSCJ 2 */
-/* ATOM_TV_PAL 3 */
-/* ATOM_TV_PALM 4 */
-/* ATOM_TV_PALCN 5 */
-/* ATOM_TV_PALN 6 */
-/* ATOM_TV_PAL60 7 */
-/* ATOM_TV_SECAM 8 */
-
-/* ucTVSuppportedStd definition: */
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/
+//ucTVBootUpDefaultStd definiton:
+
+//ATOM_TV_NTSC 1
+//ATOM_TV_NTSCJ 2
+//ATOM_TV_PAL 3
+//ATOM_TV_PALM 4
+//ATOM_TV_PALCN 5
+//ATOM_TV_PALN 6
+//ATOM_TV_PAL60 7
+//ATOM_TV_SECAM 8
+
+//ucTVSupportedStd definition:
#define NTSC_SUPPORT 0x1
#define NTSCJ_SUPPORT 0x2
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
#define MAX_SUPPORTED_TV_TIMING 2
-typedef struct _ATOM_ANALOG_TV_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
- ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO;
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
#define MAX_SUPPORTED_TV_TIMING_V1_2 3
-typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO_V1_2;
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+ UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
+ UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+ UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
+ UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK 0x1F
/**************************************************************************/
-/* VRAM usage and their definitions */
+// VRAM usage and their defintions
-/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
-/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
-/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
-/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
-/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
#ifndef VESA_MEMORY_IN_64K_BLOCK
-#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
+#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
#endif
-#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
-#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
+#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
#define ATOM_HWICON_INFOTABLE_SIZE 32
#define MAX_DTD_MODE_IN_VRAM 6
-#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
-#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
#define DFP_ENCODER_TYPE_OFFSET 0x80
#define DP_ENCODER_LANE_NUM_OFFSET 0x84
#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
-#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
-/* The size below is in Kb! */
+//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
-
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
-/***********************************************************************************/
-/* Structure used in VRAM_UsageByFirmwareTable */
-/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
-/* at running time. */
-/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
-/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
-/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
-/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
-/***********************************************************************************/
+/***********************************************************************************/
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+// at running time.
+// note2: From RV770, the memory is more than 32bit addressable, so we will change
+// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
+// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
+// (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else //Non VGA case
+ if (FB_Size<=2Gb)
+ FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+ FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
-typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
- ULONG ulStartAddrUsedByFirmware;
- USHORT usFirmwareUseInKb;
- USHORT usReserved;
-} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
-typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_FIRMWARE_VRAM_RESERVE_INFO
- asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
-} ATOM_VRAM_USAGE_BY_FIRMWARE;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
-/****************************************************************************/
-/* Structure used in GPIO_Pin_LUTTable */
-/****************************************************************************/
-typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
- USHORT usGpioPin_AIndex;
- UCHAR ucGpioPinBitShift;
- UCHAR ucGPIO_ID;
-} ATOM_GPIO_PIN_ASSIGNMENT;
+// change verion to 1.5, when allow driver to allocate the vram area for command table access.
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
-typedef struct _ATOM_GPIO_PIN_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
-} ATOM_GPIO_PIN_LUT;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+ USHORT usGpioPin_AIndex;
+ UCHAR ucGpioPinBitShift;
+ UCHAR ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
-/****************************************************************************/
-/* Structure used in ComponentVideoInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+// Structure used in ComponentVideoInfoTable
+/****************************************************************************/
#define GPIO_PIN_ACTIVE_HIGH 0x1
#define MAX_SUPPORTED_CV_STANDARDS 5
-/* definitions for ATOM_D_INFO.ucSettings */
-#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
-#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
-#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
-typedef struct _ATOM_GPIO_INFO {
- USHORT usAOffset;
- UCHAR ucSettings;
- UCHAR ucReserved;
-} ATOM_GPIO_INFO;
+typedef struct _ATOM_GPIO_INFO
+{
+ USHORT usAOffset;
+ UCHAR ucSettings;
+ UCHAR ucReserved;
+}ATOM_GPIO_INFO;
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
-#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
-#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
-
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
-/* Line 3 out put 5V. */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
-
-/* Line 3 out put 2.2V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
-
-/* Line 3 out put 0V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
-
-#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
-
-#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
-
-/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-
-typedef struct _ATOM_COMPONENT_VIDEO_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMask_PinRegisterIndex;
- USHORT usEN_PinRegisterIndex;
- USHORT usY_PinRegisterIndex;
- USHORT usA_PinRegisterIndex;
- UCHAR ucBitShift;
- UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
- ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucLetterBoxMode;
- UCHAR ucReserved[3];
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucReserved;
- UCHAR ucLetterBoxMode;
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO_V21;
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
+
+//Line 3 out put 2.2V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMask_PinRegisterIndex;
+ USHORT usEN_PinRegisterIndex;
+ USHORT usY_PinRegisterIndex;
+ USHORT usA_PinRegisterIndex;
+ UCHAR ucBitShift;
+ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
+ ATOM_DTD_FORMAT sReserved; // must be zeroed out
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucReserved[3];
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucReserved;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
-/****************************************************************************/
-/* Structure used in object_InfoTable */
-/****************************************************************************/
-typedef struct _ATOM_OBJECT_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- USHORT usConnectorObjectTableOffset;
- USHORT usRouterObjectTableOffset;
- USHORT usEncoderObjectTableOffset;
- USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
- USHORT usDisplayPathTableOffset;
-} ATOM_OBJECT_HEADER;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH {
- USHORT usDeviceTag; /* supported device */
- USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
- USHORT usConnObjectId; /* Connector Object ID */
- USHORT usGPUObjectId; /* GPU ID */
- USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
-} ATOM_DISPLAY_OBJECT_PATH;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
- UCHAR ucNumOfDispPath;
- UCHAR ucVersion;
- UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
-} ATOM_DISPLAY_OBJECT_PATH_TABLE;
-
-typedef struct _ATOM_OBJECT /* each object has this structure */
-{
- USHORT usObjectID;
- USHORT usSrcDstTableOffset;
- USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
- USHORT usReserved;
-} ATOM_OBJECT;
-
-typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
-{
- UCHAR ucNumberOfObjects;
- UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
-} ATOM_OBJECT_TABLE;
-
-typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
-{
- UCHAR ucNumberOfSrc;
- USHORT usSrcObjectID[1];
- UCHAR ucNumberOfDst;
- USHORT usDstObjectID[1];
-} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
-
-/* Related definitions, all records are differnt but they have a commond header */
-typedef struct _ATOM_COMMON_RECORD_HEADER {
- UCHAR ucRecordType; /* An emun to indicate the record type */
- UCHAR ucRecordSize; /* The size of the whole record in byte */
-} ATOM_COMMON_RECORD_HEADER;
-
-#define ATOM_I2C_RECORD_TYPE 1
+/****************************************************************************/
+// Structure used in object_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+ USHORT usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+ UCHAR ucNumOfDispPath;
+ UCHAR ucVersion;
+ UCHAR ucPadding[2];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT //each object has this structure
+{
+ USHORT usObjectID;
+ USHORT usSrcDstTableOffset;
+ USHORT usRecordOffset; //this pointing to a bunch of records defined below
+ USHORT usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
+{
+ UCHAR ucNumberOfObjects;
+ UCHAR ucPadding[3];
+ ATOM_OBJECT asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
+{
+ UCHAR ucNumberOfSrc;
+ USHORT usSrcObjectID[1];
+ UCHAR ucNumberOfDst;
+ USHORT usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0 0
+#define EXT_HPDPIN_LUTINDEX_1 1
+#define EXT_HPDPIN_LUTINDEX_2 2
+#define EXT_HPDPIN_LUTINDEX_3 3
+#define EXT_HPDPIN_LUTINDEX_4 4
+#define EXT_HPDPIN_LUTINDEX_5 5
+#define EXT_HPDPIN_LUTINDEX_6 6
+#define EXT_HPDPIN_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0 0
+#define EXT_AUXDDC_LUTINDEX_1 1
+#define EXT_AUXDDC_LUTINDEX_2 2
+#define EXT_AUXDDC_LUTINDEX_3 3
+#define EXT_AUXDDC_LUTINDEX_4 4
+#define EXT_AUXDDC_LUTINDEX_5 5
+#define EXT_AUXDDC_LUTINDEX_6 6
+#define EXT_AUXDDC_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+
+typedef struct _EXT_DISPLAY_PATH
+{
+ USHORT usDeviceTag; //A bit vector to show what devices are supported
+ USHORT usDeviceACPIEnum; //16bit device ACPI id.
+ USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
+ UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
+ UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
+ USHORT usExtEncoderObjId; //external encoder object id
+ USHORT usReserved[3];
+}EXT_DISPLAY_PATH;
+
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
+typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR Reserved [7]; // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are differnt but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+ UCHAR ucRecordType; //An emun to indicate the record type
+ UCHAR ucRecordSize; //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE 1
#define ATOM_HPD_INT_RECORD_TYPE 2
#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
-#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
-#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
-#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
-#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
-#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
-
-/* Must be updated when new record type is added,equal to that record definition! */
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
-
-typedef struct _ATOM_I2C_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_I2C_ID_CONFIG sucI2cId;
- UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
-} ATOM_I2C_RECORD;
-
-typedef struct _ATOM_HPD_INT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPlugged_PinState;
-} ATOM_HPD_INT_RECORD;
-
-typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucProtectionFlag;
- UCHAR ucReserved;
-} ATOM_OUTPUT_PROTECTION_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
- ULONG ulACPIDeviceEnum; /* Reserved for now */
- USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
- USHORT usPadding;
-} ATOM_CONNECTOR_DEVICE_TAG;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucNumberOfDevice;
- UCHAR ucReserved;
- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
-} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucConfigGPIOID;
- UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
- UCHAR ucFlowinGPIPID;
- UCHAR ucExtInGPIPID;
-} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
-
-typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucCTL1GPIO_ID;
- UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL2GPIO_ID;
- UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL3GPIO_ID;
- UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTLFPGA_IN_ID;
- UCHAR ucPadding[3];
-} ATOM_ENCODER_FPGA_CONTROL_RECORD;
-
-typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
-} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
-
-typedef struct _ATOM_JTAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucTMSGPIO_ID;
- UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTCKGPIO_ID;
- UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDOGPIO_ID;
- UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDIGPIO_ID;
- UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
- UCHAR ucPadding[2];
-} ATOM_JTAG_RECORD;
-
-/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
-typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
- UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
- UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
-} ATOM_GPIO_PIN_CONTROL_PAIR;
-
-typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucFlags; /* Future expnadibility */
- UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
-} ATOM_OBJECT_GPIO_CNTL_RECORD;
-
-/* Definitions for GPIO pin state */
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG sucI2cId;
+ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucProtectionFlag;
+ UCHAR ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG
+{
+ ULONG ulACPIDeviceEnum; //Reserved for now
+ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+ USHORT usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucNumberOfDevice;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucConfigGPIOID;
+ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
+ UCHAR ucFlowinGPIPID;
+ UCHAR ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucCTL1GPIO_ID;
+ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL2GPIO_ID;
+ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL3GPIO_ID;
+ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTLFPGA_IN_ID;
+ UCHAR ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucTMSGPIO_ID;
+ UCHAR ucTMSGPIOState; //Set to 1 when it's active high
+ UCHAR ucTCKGPIO_ID;
+ UCHAR ucTCKGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDOGPIO_ID;
+ UCHAR ucTDOGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDIGPIO_ID;
+ UCHAR ucTDIGPIOState; //Set to 1 when it's active high
+ UCHAR ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
+ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucFlags; // Future expnadibility
+ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state
#define GPIO_PIN_TYPE_INPUT 0x00
#define GPIO_PIN_TYPE_OUTPUT 0x10
#define GPIO_PIN_TYPE_HW_CONTROL 0x20
-/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
+//For GPIO_PIN_TYPE_OUTPUT the following is defined
#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
-typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ULONG ulStrengthControl; /* DVOA strength control for CF */
- UCHAR ucPadding[2];
-} ATOM_ENCODER_DVO_CF_RECORD;
+// Indexes to GPIO array in GLSync record
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ULONG ulStrengthControl; // DVOA strength control for CF
+ UCHAR ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
-/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
-typedef struct _ATOM_CONNECTOR_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- USHORT usMaxPixClk;
- UCHAR ucFlowCntlGpioId;
- UCHAR ucSwapCntlGpioId;
- UCHAR ucConnectedDvoBundle;
- UCHAR ucPadding;
-} ATOM_CONNECTOR_CF_RECORD;
-
-typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_DTD_FORMAT asTiming;
-} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
-
-typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
- UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
- UCHAR ucReserved;
-} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
-
-typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
-
-typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType;
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
-
-/* define ucMuxType */
+typedef struct _ATOM_CONNECTOR_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usMaxPixClk;
+ UCHAR ucFlowCntlGpioId;
+ UCHAR ucSwapCntlGpioId;
+ UCHAR ucConnectedDvoBundle;
+ UCHAR ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_DTD_FORMAT asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+ UCHAR ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType;
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
-/****************************************************************************/
-/* ASIC voltage data table */
-/****************************************************************************/
-typedef struct _ATOM_VOLTAGE_INFO_HEADER {
- USHORT usVDDCBaseLevel; /* In number of 50mv unit */
- USHORT usReserved; /* For possible extension table offset */
- UCHAR ucNumOfVoltageEntries;
- UCHAR ucBytesPerVoltageEntry;
- UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
- UCHAR ucDefaultVoltageEntry;
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
-} ATOM_VOLTAGE_INFO_HEADER;
-
-typedef struct _ATOM_VOLTAGE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_INFO_HEADER viHeader;
- UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
-} ATOM_VOLTAGE_INFO;
-
-typedef struct _ATOM_VOLTAGE_FORMULA {
- USHORT usVoltageBaseLevel; /* In number of 1mv unit */
- USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
- UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
- UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
- UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
- UCHAR ucReserved;
- UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
-} ATOM_VOLTAGE_FORMULA;
-
-typedef struct _ATOM_VOLTAGE_CONTROL {
- UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
- USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
- UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
- UCHAR ucReserved;
-} ATOM_VOLTAGE_CONTROL;
-
-/* Define ucVoltageControlId */
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usObjectID; //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/
+// ASIC voltage data table
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER
+{
+ USHORT usVDDCBaseLevel; //In number of 50mv unit
+ USHORT usReserved; //For possible extension table offset
+ UCHAR ucNumOfVoltageEntries;
+ UCHAR ucBytesPerVoltageEntry;
+ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
+ UCHAR ucDefaultVoltageEntry;
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_INFO_HEADER viHeader;
+ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct _ATOM_VOLTAGE_FORMULA
+{
+ USHORT usVoltageBaseLevel; // In number of 1mv unit
+ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
+ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+ UCHAR ucReserved;
+ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct _VOLTAGE_LUT_ENTRY
+{
+ USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
+ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct _ATOM_VOLTAGE_FORMULA_V2
+{
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucReserved[3];
+ VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ USHORT usGpioPin_AIndex; //GPIO_PAD register index
+ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
+ UCHAR ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
#define VOLTAGE_CONTROLLED_BY_HW 0x00
#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
-#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
-#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DS4402 0x04
-
-typedef struct _ATOM_VOLTAGE_OBJECT {
- UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
- UCHAR ucSize; /* Size of Object */
- ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
- ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
-} ATOM_VOLTAGE_OBJECT;
-
-typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
-} ATOM_VOLTAGE_OBJECT_INFO;
-
-typedef struct _ATOM_LEAKID_VOLTAGE {
- UCHAR ucLeakageId;
- UCHAR ucReserved;
- USHORT usVoltage;
-} ATOM_LEAKID_VOLTAGE;
-
-typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
- UCHAR ucProfileId;
- UCHAR ucReserved;
- USHORT usSize;
- USHORT usEfuseSpareStartAddr;
- USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
- ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
-} ATOM_ASIC_PROFILE_VOLTAGE;
-
-/* ucProfileId */
-#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
+#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402 0x04
+
+typedef struct _ATOM_VOLTAGE_OBJECT
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_V2
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct _ATOM_LEAKID_VOLTAGE
+{
+ UCHAR ucLeakageId;
+ UCHAR ucReserved;
+ USHORT usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+{
+ UCHAR ucProfileId;
+ UCHAR ucReserved;
+ USHORT usSize;
+ USHORT usEfuseSpareStartAddr;
+ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
+ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
-typedef struct _ATOM_ASIC_PROFILING_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
-} ATOM_ASIC_PROFILING_INFO;
-
-typedef struct _ATOM_POWER_SOURCE_OBJECT {
- UCHAR ucPwrSrcId; /* Power source */
- UCHAR ucPwrSensorType; /* GPIO, I2C or none */
- UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
- UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
- UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
- UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
- UCHAR ucPwrSensActiveState; /* high active or low active */
- UCHAR ucReserve[3]; /* reserve */
- USHORT usSensPwr; /* in unit of watt */
-} ATOM_POWER_SOURCE_OBJECT;
-
-typedef struct _ATOM_POWER_SOURCE_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- UCHAR asPwrbehave[16];
- ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
-} ATOM_POWER_SOURCE_INFO;
-
-/* Define ucPwrSrcId */
+typedef struct _ATOM_ASIC_PROFILING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+ UCHAR ucPwrSrcId; // Power source
+ UCHAR ucPwrSensorType; // GPIO, I2C or none
+ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
+ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
+ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
+ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
+ UCHAR ucPwrSensActiveState; // high active or low active
+ UCHAR ucReserve[3]; // reserve
+ USHORT usSensPwr; // in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR asPwrbehave[16];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
#define POWERSOURCE_PCIE_ID1 0x00
#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
-/* define ucPwrSensorId */
+//define ucPwrSensorId
#define POWER_SENSOR_ALWAYS 0x00
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock;
+ ULONG ulDentistVCOFreq;
+ ULONG ulBootUpUMAClock;
+ ULONG ulReserved1[8];
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulGPUCapInfo;
+ ULONG ulReserved2[3];
+ ULONG ulSystemConfig;
+ ULONG ulCPUCapInfo;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucTjOffset;
+ UCHAR ucMemoryType;
+ UCHAR ucUMAChannelNumber;
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10];
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
+ ULONG ulReserved3[42];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+/**********************************************************************************************************************
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
+//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+//ulReserved1[8] Reserved by now, must be 0x0.
+//ulBootUpReqDisplayVector VBIOS boot up display IDs
+// ATOM_DEVICE_CRT1_SUPPORT 0x0001
+// ATOM_DEVICE_CRT2_SUPPORT 0x0010
+// ATOM_DEVICE_DFP1_SUPPORT 0x0008
+// ATOM_DEVICE_DFP6_SUPPORT 0x0040
+// ATOM_DEVICE_DFP2_SUPPORT 0x0080
+// ATOM_DEVICE_DFP3_SUPPORT 0x0200
+// ATOM_DEVICE_DFP4_SUPPORT 0x0400
+// ATOM_DEVICE_DFP5_SUPPORT 0x0800
+// ATOM_DEVICE_LCD1_SUPPORT 0x0002
+//ulOtherDisplayMisc Other display related flags, not defined yet.
+//ulGPUCapInfo TBD
+//ulReserved2[3] must be 0x0 for the reserved.
+//ulSystemConfig TBD
+//ulCPUCapInfo TBD
+//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usBootUpNBVoltage Boot up NB voltage in unit of mv.
+//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
+//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
+//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+//ucUMAChannelNumber System memory channel numbers.
+//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
+//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
+//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
+//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+**********************************************************************************************************************/
+
/**************************************************************************/
-/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
-/* Memory SS Info Table */
-/* Define Memory Clock SS chip ID */
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
#define ICS91719 1
#define ICS91720 2
-/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
-typedef struct _ATOM_I2C_DATA_RECORD {
- UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
- UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
-} ATOM_I2C_DATA_RECORD;
-
-/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
-typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
- UCHAR ucSSChipID; /* SS chip being used */
- UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
- UCHAR ucNumOfI2CDataRecords; /* number of data block */
- ATOM_I2C_DATA_RECORD asI2CData[1];
-} ATOM_I2C_DEVICE_SETUP_INFO;
-
-/* ========================================================================================== */
-typedef struct _ATOM_ASIC_MVDD_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
-} ATOM_ASIC_MVDD_INFO;
-
-/* ========================================================================================== */
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
+ UCHAR ucSSChipID; //SS chip being used
+ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
+ UCHAR ucNumOfI2CDataRecords; //number of data block
+ ATOM_I2C_DATA_RECORD asI2CData[1];
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct _ATOM_ASIC_MVDD_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
-/* ========================================================================================== */
+//==========================================================================================
/**************************************************************************/
-typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
- ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
- USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
- USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
- UCHAR ucClockIndication; /* Indicate which clock source needs SS */
- UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
- UCHAR ucReserved[2];
-} ATOM_ASIC_SS_ASSIGNMENT;
-
-/* Define ucSpreadSpectrumType */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
-#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_SS_ON_TMDS 4
+#define ASIC_INTERNAL_SS_ON_HDMI 5
+#define ASIC_INTERNAL_SS_ON_LVDS 6
+#define ASIC_INTERNAL_SS_ON_DP 7
+#define ASIC_INTERNAL_SS_ON_DCPLL 8
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+//#define ATOM_INTERNAL_SS_MASK 0x00000000
+//#define ATOM_EXTERNAL_SS_MASK 0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
-typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
-} ATOM_ASIC_INTERNAL_SS_INFO;
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
-/* ==============================Scratch Pad Definition Portion=============================== */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
#define ATOM_DEVICE_CONNECT_INFO_DEF 0
#define ATOM_ROM_LOCATION_DEF 1
#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_I2C_CHANNEL_STATUS_DEF 8
#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
-/* BIOS_0_SCRATCH Definition */
+
+// BIOS_0_SCRATCH Definition
#define ATOM_S0_CRT1_MONO 0x00000001L
#define ATOM_S0_CRT1_COLOR 0x00000002L
#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_CV_DIN_A 0x00000020L
#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
#define ATOM_S0_CRT2_MONO 0x00000100L
#define ATOM_S0_CRT2_COLOR 0x00000200L
#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2 0x00020000L
#define ATOM_S0_LCD1 0x00040000L
#define ATOM_S0_LCD2 0x00080000L
-#define ATOM_S0_TV2 0x00100000L
-#define ATOM_S0_DFP3 0x00200000L
-#define ATOM_S0_DFP4 0x00400000L
-#define ATOM_S0_DFP5 0x00800000L
+#define ATOM_S0_DFP6 0x00100000L
+#define ATOM_S0_DFP3 0x00200000L
+#define ATOM_S0_DFP4 0x00400000L
+#define ATOM_S0_DFP5 0x00800000L
-#define ATOM_S0_DFP_MASK \
- (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
+#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
-#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
- /* the FAD/HDP reg access bug. Bit is read by DAL */
+#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
+ // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
#define ATOM_S0_THERMAL_STATE_SHIFT 26
#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
-#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2b2 0x02
#define ATOM_S0_LCD1b2 0x04
#define ATOM_S0_LCD2b2 0x08
-#define ATOM_S0_TV2b2 0x10
-#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP6b2 0x10
+#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP4b2 0x40
+#define ATOM_S0_DFP5b2 0x80
+
#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
#define ATOM_S0_LCD1_SHIFT 18
-/* BIOS_1_SCRATCH Definition */
+// BIOS_1_SCRATCH Definition
#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
-/* BIOS_2_SCRATCH Definition */
+// BIOS_2_SCRATCH Definition
#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
-#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
-#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
-#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
-#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
-#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
-#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
-#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
-#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
-#define ATOM_S2_CV_DPMS_STATE 0x01000000L
-#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
-#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
-#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
-
-#define ATOM_S2_DFP_DPM_STATE \
- (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
- ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
- ATOM_S2_DFP5_DPMS_STATE)
-
-#define ATOM_S2_DEVICE_DPMS_STATE \
- (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
- ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
- ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
- ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
-
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
+#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
-/* Byte aligned definition for BIOS usage */
+
+//Byte aligned defintion for BIOS usage
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
-#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
-#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
-#define ATOM_S2_TV1_DPMS_STATEb2 0x04
-#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
-#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
-#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
-#define ATOM_S2_TV2_DPMS_STATEb2 0x40
-#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
-#define ATOM_S2_CV_DPMS_STATEb3 0x01
-#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
-#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
-#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
-/* BIOS_3_SCRATCH Definition */
+
+// BIOS_3_SCRATCH Definition
#define ATOM_S3_CRT1_ACTIVE 0x00000001L
#define ATOM_S3_LCD1_ACTIVE 0x00000002L
#define ATOM_S3_TV1_ACTIVE 0x00000004L
#define ATOM_S3_DFP1_ACTIVE 0x00000008L
#define ATOM_S3_CRT2_ACTIVE 0x00000010L
#define ATOM_S3_LCD2_ACTIVE 0x00000020L
-#define ATOM_S3_TV2_ACTIVE 0x00000040L
+#define ATOM_S3_DFP6_ACTIVE 0x00000040L
#define ATOM_S3_DFP2_ACTIVE 0x00000080L
#define ATOM_S3_CV_ACTIVE 0x00000100L
#define ATOM_S3_DFP3_ACTIVE 0x00000200L
#define ATOM_S3_DFP4_ACTIVE 0x00000400L
#define ATOM_S3_DFP5_ACTIVE 0x00000800L
-#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
+#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
-#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
#define ATOM_S3_DFP1_ACTIVEb0 0x08
#define ATOM_S3_CRT2_ACTIVEb0 0x10
#define ATOM_S3_LCD2_ACTIVEb0 0x20
-#define ATOM_S3_TV2_ACTIVEb0 0x40
+#define ATOM_S3_DFP6_ACTIVEb0 0x40
#define ATOM_S3_DFP2_ACTIVEb0 0x80
#define ATOM_S3_CV_ACTIVEb1 0x01
#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
-#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
-#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
-#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
-#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
-
-/* BIOS_4_SCRATCH Definition */
+// BIOS_4_SCRATCH Definition
#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
-/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
#define ATOM_S5_DOS_REQ_CRT1b0 0x01
#define ATOM_S5_DOS_REQ_LCD1b0 0x02
#define ATOM_S5_DOS_REQ_TV1b0 0x04
#define ATOM_S5_DOS_REQ_DFP1b0 0x08
#define ATOM_S5_DOS_REQ_CRT2b0 0x10
#define ATOM_S5_DOS_REQ_LCD2b0 0x20
-#define ATOM_S5_DOS_REQ_TV2b0 0x40
+#define ATOM_S5_DOS_REQ_DFP6b0 0x40
#define ATOM_S5_DOS_REQ_DFP2b0 0x80
#define ATOM_S5_DOS_REQ_CVb1 0x01
#define ATOM_S5_DOS_REQ_DFP3b1 0x02
#define ATOM_S5_DOS_REQ_DFP4b1 0x04
#define ATOM_S5_DOS_REQ_DFP5b1 0x08
-#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
+#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
#define ATOM_S5_DOS_REQ_CRT1 0x0001
#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S5_DOS_REQ_DFP1 0x0008
#define ATOM_S5_DOS_REQ_CRT2 0x0010
#define ATOM_S5_DOS_REQ_LCD2 0x0020
-#define ATOM_S5_DOS_REQ_TV2 0x0040
+#define ATOM_S5_DOS_REQ_DFP6 0x0040
#define ATOM_S5_DOS_REQ_DFP2 0x0080
#define ATOM_S5_DOS_REQ_CV 0x0100
-#define ATOM_S5_DOS_REQ_DFP3 0x0200
-#define ATOM_S5_DOS_REQ_DFP4 0x0400
-#define ATOM_S5_DOS_REQ_DFP5 0x0800
+#define ATOM_S5_DOS_REQ_DFP3 0x0200
+#define ATOM_S5_DOS_REQ_DFP4 0x0400
+#define ATOM_S5_DOS_REQ_DFP5 0x0800
#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
-#define ATOM_S5_DOS_FORCE_DEVICEw1 \
- (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
- ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
+#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+ (ATOM_S5_DOS_FORCE_CVb3<<8))
-/* BIOS_6_SCRATCH Definition */
+// BIOS_6_SCRATCH Definition
#define ATOM_S6_DEVICE_CHANGE 0x00000001L
#define ATOM_S6_SCALER_CHANGE 0x00000002L
#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
-#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
-#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
-#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
+#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
-#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
#define ATOM_S6_ACC_REQ_CV 0x01000000L
#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_LID_STATEb0 0x40
#define ATOM_S6_DOCK_STATEb0 0x80
#define ATOM_S6_CRITICAL_STATEb1 0x01
-#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
+#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
-#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
#define ATOM_S6_ACC_REQ_CRT1b2 0x01
#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1b2 0x08
#define ATOM_S6_ACC_REQ_CRT2b2 0x10
#define ATOM_S6_ACC_REQ_LCD2b2 0x20
-#define ATOM_S6_ACC_REQ_TV2b2 0x40
+#define ATOM_S6_ACC_REQ_DFP6b2 0x40
#define ATOM_S6_ACC_REQ_DFP2b2 0x80
#define ATOM_S6_ACC_REQ_CVb3 0x01
-#define ATOM_S6_ACC_REQ_DFP3b3 0x02
-#define ATOM_S6_ACC_REQ_DFP4b3 0x04
-#define ATOM_S6_ACC_REQ_DFP5b3 0x08
+#define ATOM_S6_ACC_REQ_DFP3b3 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3 0x08
#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
-/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
#define ATOM_S7_DOS_MODE_TYPEb0 0x03
#define ATOM_S7_DOS_MODE_VGAb0 0x00
#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
-/* BIOS_8_SCRATCH Definition */
+// BIOS_8_SCRATCH Definition
#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
-#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
-/* BIOS_9_SCRATCH Definition */
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
#endif
+
#define ATOM_FLAG_SET 0x20
#define ATOM_FLAG_CLEAR 0
-#define CLEAR_ATOM_S6_ACC_MODE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
-#define SET_ATOM_S6_DEVICE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SCALER_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_LID_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_DOCK_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
- ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_I2C_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DEVICE_RECONFIG \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S0_LCD1 \
- ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
- ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
-#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-/****************************************************************************/
-/* Portion II: Definitinos only used in Driver */
+#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+//Portion II: Definitinos only used in Driver
/****************************************************************************/
-/* Macros used by driver */
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
-/****************************************************************************/
-/* Portion III: Definitinos only used in VBIOS */
+/****************************************************************************/
+//Portion III: Definitinos only used in VBIOS
/****************************************************************************/
#define ATOM_DAC_SRC 0x80
#define ATOM_SRC_DAC1 0
#define ATOM_SRC_DAC2 0x80
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
-
-typedef struct _MEMORY_PLLINIT_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- UCHAR ucAction; /* not define yet */
- UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
- UCHAR ucFbDiv; /* FB value */
- UCHAR ucPostDiv; /* Post div */
-} MEMORY_PLLINIT_PARAMETERS;
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ UCHAR ucAction; //not define yet
+ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
+ UCHAR ucFbDiv; //FB value
+ UCHAR ucPostDiv; //Post div
+}MEMORY_PLLINIT_PARAMETERS;
#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
-#define GPIO_PIN_WRITE 0x01
+
+#define GPIO_PIN_WRITE 0x01
#define GPIO_PIN_READ 0x00
-typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
- UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
- UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
- UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
- UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
-} GPIO_PIN_CONTROL_PARAMETERS;
-
-typedef struct _ENABLE_SCALER_PARAMETERS {
- UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
- UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
- UCHAR ucTVStandard; /* */
- UCHAR ucPadding[1];
-} ENABLE_SCALER_PARAMETERS;
-#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
-
-/* ucEnable: */
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS
+{
+ UCHAR ucGPIO_ID; //return value, read from GPIO pins
+ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
+ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
+ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
+ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+ UCHAR ucTVStandard; //
+ UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+//ucEnable:
#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
#define SCALER_ENABLE_MULTITAP_MODE 3
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
- ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
- UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
- UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
- UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
-} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
-
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
- ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
- ENABLE_CRTC_PARAMETERS sReserved;
-} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucPadding[3];
-} ENABLE_GRAPH_SURFACE_PARAMETERS;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
- ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
- ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
-} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
-
-typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
- USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
- USHORT usMemorySize; /* 8Kb blocks aligned */
-} MEMORY_CLEAN_UP_PARAMETERS;
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
+ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
+ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
+ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+ ENABLE_CRTC_PARAMETERS sReserved;
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+ USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
+ USHORT usMemorySize; //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
-typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
- USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
- USHORT usY_Size;
-} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
+ USHORT usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
-typedef struct _INDIRECT_IO_ACCESS {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR IOAccessSequence[256];
+typedef struct _INDIRECT_IO_ACCESS
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR IOAccessSequence[256];
} INDIRECT_IO_ACCESS;
#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
-typedef struct _ATOM_OEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_OEM_INFO;
-
-typedef struct _ATOM_TV_MODE {
- UCHAR ucVMode_Num; /* Video mode number */
- UCHAR ucTV_Mode_Num; /* Internal TV mode number */
-} ATOM_TV_MODE;
-
-typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
- USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
- USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
- USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
- USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
-} ATOM_BIOS_INT_TVSTD_MODE;
-
-typedef struct _ATOM_TV_MODE_SCALER_PTR {
- USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
- USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
- UCHAR ucTV_Mode_Num;
-} ATOM_TV_MODE_SCALER_PTR;
-
-typedef struct _ATOM_STANDARD_VESA_TIMING {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
-} ATOM_STANDARD_VESA_TIMING;
-
-typedef struct _ATOM_STD_FORMAT {
- USHORT usSTD_HDisp;
- USHORT usSTD_VDisp;
- USHORT usSTD_RefreshRate;
- USHORT usReserved;
-} ATOM_STD_FORMAT;
-
-typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
- USHORT usVESA_ModeNumber;
- USHORT usExtendedModeNumber;
-} ATOM_VESA_TO_EXTENDED_MODE;
-
-typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
-} ATOM_VESA_TO_INTENAL_MODE_LUT;
+typedef struct _ATOM_OEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+ UCHAR ucVMode_Num; //Video mode number
+ UCHAR ucTV_Mode_Num; //Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
+ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
+ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
+ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
+ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
+ UCHAR ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{
+ USHORT usSTD_HDisp;
+ USHORT usSTD_VDisp;
+ USHORT usSTD_RefreshRate;
+ USHORT usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+ USHORT usVESA_ModeNumber;
+ USHORT usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
/*************** ATOM Memory Related Data Structure ***********************/
-typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
- UCHAR ucMemoryType;
- UCHAR ucMemoryVendor;
- UCHAR ucAdjMCId;
- UCHAR ucDynClkId;
- ULONG ulDllResetClkRange;
-} ATOM_MEMORY_VENDOR_BLOCK;
-
-typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+ UCHAR ucMemoryType;
+ UCHAR ucMemoryVendor;
+ UCHAR ucAdjMCId;
+ UCHAR ucDynClkId;
+ ULONG ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
#if ATOM_BIG_ENDIAN
- ULONG ucMemBlkId:8;
- ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
#else
- ULONG ulMemClockRange:24;
- ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
#endif
-} ATOM_MEMORY_SETTING_ID_CONFIG;
-
-typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
- ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
- ULONG ulAccess;
-} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
-
-typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
- ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
- ULONG aulMemData[1];
-} ATOM_MEMORY_SETTING_DATA_BLOCK;
-
-typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
- USHORT usRegIndex; /* MC register index */
- UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
-} ATOM_INIT_REG_INDEX_FORMAT;
-
-typedef struct _ATOM_INIT_REG_BLOCK {
- USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
- USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
- ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
- ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
-} ATOM_INIT_REG_BLOCK;
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+ ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+ ULONG ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+ ULONG aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+ USHORT usRegIndex; // MC register index
+ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+ USHORT usRegIndexTblSize; //size of asRegIndexBuf
+ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
+ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
#define END_OF_REG_INDEX_BLOCK 0x0ffff
#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-typedef struct _ATOM_MC_INIT_PARAM_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usAdjustARB_SEQDataOffset;
- USHORT usMCInitMemTypeTblOffset;
- USHORT usMCInitCommonTblOffset;
- USHORT usMCInitPowerDownTblOffset;
- ULONG ulARB_SEQDataBuf[32];
- ATOM_INIT_REG_BLOCK asMCInitMemType;
- ATOM_INIT_REG_BLOCK asMCInitCommon;
-} ATOM_MC_INIT_PARAM_TABLE;
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usAdjustARB_SEQDataOffset;
+ USHORT usMCInitMemTypeTblOffset;
+ USHORT usMCInitCommonTblOffset;
+ USHORT usMCInitPowerDownTblOffset;
+ ULONG ulARB_SEQDataBuf[32];
+ ATOM_INIT_REG_BLOCK asMCInitMemType;
+ ATOM_INIT_REG_BLOCK asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
#define _4Mx16 0x2
#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
#define QIMONDA INFINEON
#define PROMOS MOSEL
+#define KRETON INFINEON
-/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
#define UCODE_ROM_START_ADDRESS 0x1c000
-#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
-
-/* uCode block header for reference */
-
-typedef struct _MCuCodeHeader {
- ULONG ulSignature;
- UCHAR ucRevision;
- UCHAR ucChecksum;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
- USHORT usParametersLength;
- USHORT usUCodeLength;
- USHORT usReserved1;
- USHORT usReserved2;
+#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+ ULONG ulSignature;
+ UCHAR ucRevision;
+ UCHAR ucChecksum;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+ USHORT usParametersLength;
+ USHORT usUCodeLength;
+ USHORT usReserved1;
+ USHORT usReserved2;
} MCuCodeHeader;
-/* //////////////////////////////////////////////////////////////////////////////// */
+//////////////////////////////////////////////////////////////////////////////////
#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
-typedef struct _ATOM_VRAM_MODULE_V1 {
- ULONG ulReserved;
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucReserved[2];
-} ATOM_VRAM_MODULE_V1;
-
-typedef struct _ATOM_VRAM_MODULE_V2 {
- ULONG ulReserved;
- ULONG ulFlags; /* To enable/disable functionalities based on memory type */
- ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
- ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucRefreshRateFactor;
- UCHAR ucReserved[3];
-} ATOM_VRAM_MODULE_V2;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- union {
- USHORT usMRS; /* mode register */
- USHORT usDDR3_MR0;
- };
- union {
- USHORT usEMRS; /* extended mode register */
- USHORT usDDR3_MR1;
- };
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- union {
- struct {
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
- UCHAR ucReserved;
- };
- USHORT usDDR3_MR2;
- };
-} ATOM_MEMORY_TIMING_FORMAT;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- USHORT usMRS; /* mode register */
- USHORT usEMRS; /* extended mode register */
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
-/* ///////////////////////GDDR parameters/////////////////////////////////// */
- UCHAR uctCCDL; /* */
- UCHAR uctCRCRL; /* */
- UCHAR uctCRCWL; /* */
- UCHAR uctCKE; /* */
- UCHAR uctCKRSE; /* */
- UCHAR uctCKRSX; /* */
- UCHAR uctFAW32; /* */
- UCHAR ucReserved1; /* */
- UCHAR ucReserved2; /* */
- UCHAR ucTerminator;
-} ATOM_MEMORY_TIMING_FORMAT_V1;
-
-typedef struct _ATOM_MEMORY_FORMAT {
- ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
- UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
- UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_MEMORY_FORMAT;
-
-typedef struct _ATOM_VRAM_MODULE_V3 {
- ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
- USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
- USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
- USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
- UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
- UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
-} ATOM_VRAM_MODULE_V3;
-
-/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+ ULONG ulReserved;
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+ ULONG ulReserved;
+ ULONG ulFlags; // To enable/disable functionalities based on memory type
+ ULONG ulEngineClock; // Override of default engine clock for particular memory type
+ ULONG ulMemoryClock; // Override of default memory clock for particular memory type
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucRefreshRateFactor;
+ UCHAR ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ union{
+ USHORT usMRS; // mode register
+ USHORT usDDR3_MR0;
+ };
+ union{
+ USHORT usEMRS; // extended mode register
+ USHORT usDDR3_MR1;
+ };
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ union
+ {
+ struct {
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+ UCHAR ucReserved;
+ };
+ USHORT usDDR3_MR2;
+ };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR4lo; //
+ UCHAR ucMR4hi; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+ UCHAR ucReserved;
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef struct _ATOM_MEMORY_FORMAT
+{
+ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved; // Not used for DDR3 memory
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
+ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
+ USHORT usSize; // size of ATOM_VRAM_MODULE_V3
+ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
+ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucChannelNum; // board dependent parameter:Number of channel;
+ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
+ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
#define NPL_RT_MASK 0x0f
#define BATTERY_ODT_MASK 0xc0
#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
-typedef struct _ATOM_VRAM_MODULE_V4 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
-
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved;
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucReserved2[2];
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V4;
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved;
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucReserved2[2];
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
#define VRAM_MODULE_V4_MISC_BL8 0x4
#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
-typedef struct _ATOM_VRAM_MODULE_V5 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
- UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
- ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V5;
-
-typedef struct _ATOM_VRAM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
-} ATOM_VRAM_INFO_V2;
-
-typedef struct _ATOM_VRAM_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V3;
+typedef struct _ATOM_VRAM_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V3;
#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
-typedef struct _ATOM_VRAM_INFO_V4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
- ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
- UCHAR ucReservde[4];
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V4;
-
-typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
-} ATOM_VRAM_GPIO_DETECTION_INFO;
-
-typedef struct _ATOM_MEMORY_TRAINING_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTrainingLoop;
- UCHAR ucReserved[3];
- ATOM_INIT_REG_BLOCK asMemTrainingSetting;
-} ATOM_MEMORY_TRAINING_INFO;
-
-typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
- UCHAR ucControl;
- UCHAR ucData;
- UCHAR ucSatus;
- UCHAR ucTemp;
+typedef struct _ATOM_VRAM_INFO_V4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTrainingLoop;
+ UCHAR ucReserved[3];
+ ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+ UCHAR ucControl;
+ UCHAR ucData;
+ UCHAR ucSatus;
+ UCHAR ucTemp;
} SW_I2C_CNTL_DATA_PARAMETERS;
#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
-typedef struct _SW_I2C_IO_DATA_PARAMETERS {
- USHORT GPIO_Info;
- UCHAR ucAct;
- UCHAR ucData;
-} SW_I2C_IO_DATA_PARAMETERS;
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{
+ USHORT GPIO_Info;
+ UCHAR ucAct;
+ UCHAR ucData;
+ } SW_I2C_IO_DATA_PARAMETERS;
#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
#define SW_I2C_CNTL_CLOSE 5
#define SW_I2C_CNTL_WRITE1BIT 6
-/* ==============================VESA definition Portion=============================== */
+//==============================VESA definition Portion===============================
#define VESA_OEM_PRODUCT_REV '01.00'
-#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
-typedef struct _PTR_32_BIT_STRUCTURE {
- USHORT Offset16;
- USHORT Segment16;
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+ USHORT Offset16;
+ USHORT Segment16;
} PTR_32_BIT_STRUCTURE;
-typedef union _PTR_32_BIT_UNION {
- PTR_32_BIT_STRUCTURE SegmentOffset;
- ULONG Ptr32_Bit;
+typedef union _PTR_32_BIT_UNION
+{
+ PTR_32_BIT_STRUCTURE SegmentOffset;
+ ULONG Ptr32_Bit;
} PTR_32_BIT_UNION;
-typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
- UCHAR VbeSignature[4];
- USHORT VbeVersion;
- PTR_32_BIT_UNION OemStringPtr;
- UCHAR Capabilities[4];
- PTR_32_BIT_UNION VideoModePtr;
- USHORT TotalMemory;
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+ UCHAR VbeSignature[4];
+ USHORT VbeVersion;
+ PTR_32_BIT_UNION OemStringPtr;
+ UCHAR Capabilities[4];
+ PTR_32_BIT_UNION VideoModePtr;
+ USHORT TotalMemory;
} VBE_1_2_INFO_BLOCK_UPDATABLE;
-typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
- VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
- USHORT OemSoftRev;
- PTR_32_BIT_UNION OemVendorNamePtr;
- PTR_32_BIT_UNION OemProductNamePtr;
- PTR_32_BIT_UNION OemProductRevPtr;
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+ USHORT OemSoftRev;
+ PTR_32_BIT_UNION OemVendorNamePtr;
+ PTR_32_BIT_UNION OemProductNamePtr;
+ PTR_32_BIT_UNION OemProductRevPtr;
} VBE_2_0_INFO_BLOCK_UPDATABLE;
-typedef union _VBE_VERSION_UNION {
- VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
- VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+typedef union _VBE_VERSION_UNION
+{
+ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
} VBE_VERSION_UNION;
-typedef struct _VBE_INFO_BLOCK {
- VBE_VERSION_UNION UpdatableVBE_Info;
- UCHAR Reserved[222];
- UCHAR OemData[256];
+typedef struct _VBE_INFO_BLOCK
+{
+ VBE_VERSION_UNION UpdatableVBE_Info;
+ UCHAR Reserved[222];
+ UCHAR OemData[256];
} VBE_INFO_BLOCK;
-typedef struct _VBE_FP_INFO {
- USHORT HSize;
- USHORT VSize;
- USHORT FPType;
- UCHAR RedBPP;
- UCHAR GreenBPP;
- UCHAR BlueBPP;
- UCHAR ReservedBPP;
- ULONG RsvdOffScrnMemSize;
- ULONG RsvdOffScrnMEmPtr;
- UCHAR Reserved[14];
+typedef struct _VBE_FP_INFO
+{
+ USHORT HSize;
+ USHORT VSize;
+ USHORT FPType;
+ UCHAR RedBPP;
+ UCHAR GreenBPP;
+ UCHAR BlueBPP;
+ UCHAR ReservedBPP;
+ ULONG RsvdOffScrnMemSize;
+ ULONG RsvdOffScrnMEmPtr;
+ UCHAR Reserved[14];
} VBE_FP_INFO;
-typedef struct _VESA_MODE_INFO_BLOCK {
-/* Mandatory information for all VBE revisions */
- USHORT ModeAttributes; /* dw ? ; mode attributes */
- UCHAR WinAAttributes; /* db ? ; window A attributes */
- UCHAR WinBAttributes; /* db ? ; window B attributes */
- USHORT WinGranularity; /* dw ? ; window granularity */
- USHORT WinSize; /* dw ? ; window size */
- USHORT WinASegment; /* dw ? ; window A start segment */
- USHORT WinBSegment; /* dw ? ; window B start segment */
- ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
- USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
-
-/* ; Mandatory information for VBE 1.2 and above */
- USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
- USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
- UCHAR XCharSize; /* db ? ; character cell width in pixels */
- UCHAR YCharSize; /* db ? ; character cell height in pixels */
- UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
- UCHAR BitsPerPixel; /* db ? ; bits per pixel */
- UCHAR NumberOfBanks; /* db ? ; number of banks */
- UCHAR MemoryModel; /* db ? ; memory model type */
- UCHAR BankSize; /* db ? ; bank size in KB */
- UCHAR NumberOfImagePages; /* db ? ; number of images */
- UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
-
-/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
- UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
- UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
- UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
- UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
- UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
- UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
- UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
- UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
- UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
-
-/* ; Mandatory information for VBE 2.0 and above */
- ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
- ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
- USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
-
-/* ; Mandatory information for VBE 3.0 and above */
- USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
- UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
- UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
- UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
- UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
- UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
- UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
- UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
- UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
- UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
- UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
- ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
- UCHAR Reserved; /* db 190 dup (0) */
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+ USHORT ModeAttributes; // dw ? ; mode attributes
+ UCHAR WinAAttributes; // db ? ; window A attributes
+ UCHAR WinBAttributes; // db ? ; window B attributes
+ USHORT WinGranularity; // dw ? ; window granularity
+ USHORT WinSize; // dw ? ; window size
+ USHORT WinASegment; // dw ? ; window A start segment
+ USHORT WinBSegment; // dw ? ; window B start segment
+ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
+ USHORT BytesPerScanLine;// dw ? ; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
+ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
+ UCHAR XCharSize; // db ? ; character cell width in pixels
+ UCHAR YCharSize; // db ? ; character cell height in pixels
+ UCHAR NumberOfPlanes; // db ? ; number of memory planes
+ UCHAR BitsPerPixel; // db ? ; bits per pixel
+ UCHAR NumberOfBanks; // db ? ; number of banks
+ UCHAR MemoryModel; // db ? ; memory model type
+ UCHAR BankSize; // db ? ; bank size in KB
+ UCHAR NumberOfImagePages;// db ? ; number of images
+ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
+ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
+ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
+ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
+ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
+ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
+ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
+ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
+ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
+ ULONG Reserved_1; // dd 0 ; reserved - always set to 0
+ USHORT Reserved_2; // dw 0 ; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
+ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
+ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
+ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
+ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
+ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
+ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
+ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
+ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
+ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
+ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
+ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
+ UCHAR Reserved; // db 190 dup (0)
} VESA_MODE_INFO_BLOCK;
-/* BIOS function CALLS */
-#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
-#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
+#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
-#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
+#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
-#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
-#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
-#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
-#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
-#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
-#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
-#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
-#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
-#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
-#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
-
-#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
-#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
-#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
-#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
-#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
-#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
-#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
-#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
+
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
+#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
+#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
+#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
-/* structure used for VBIOS only */
+// structure used for VBIOS only
-/* DispOutInfoTable */
-typedef struct _ASIC_TRANSMITTER_INFO {
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
USHORT usTransmitterObjId;
USHORT usSupportDevice;
- UCHAR ucTransmitterCmdTblId;
- UCHAR ucConfig;
- UCHAR ucEncoderID; /* available 1st encoder ( default ) */
- UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
- UCHAR uc2ndEncoderID;
- UCHAR ucReserved;
-} ASIC_TRANSMITTER_INFO;
-
-typedef struct _ASIC_ENCODER_INFO {
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; //available 1st encoder ( default )
+ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+typedef struct _ASIC_ENCODER_INFO
+{
UCHAR ucEncoderID;
UCHAR ucEncoderConfig;
- USHORT usEncoderCmdTblId;
-} ASIC_ENCODER_INFO;
+ USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
-typedef struct _ATOM_DISP_OUT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT ptrTransmitterInfo;
USHORT ptrEncoderInfo;
- ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
- ASIC_ENCODER_INFO asEncoderInfo[1];
-} ATOM_DISP_OUT_INFO;
+ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
-/* DispDevicePriorityInfo */
-typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT asDevicePriority[16];
-} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
-
-/* ProcessAuxChannelTransactionTable */
-typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
- USHORT lpAuxRequest;
- USHORT lpDataOut;
- UCHAR ucChannelID;
- union {
- UCHAR ucReplyStatus;
- UCHAR ucDelay;
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
};
- UCHAR ucDataOutLen;
- UCHAR ucReserved;
-} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+ UCHAR ucDataOutLen;
+ UCHAR ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
-/* GetSinkType */
+//GetSinkType
-typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
USHORT ucLinkClock;
- union {
- UCHAR ucConfig; /* for DP training command */
- UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
+ union
+ {
+ UCHAR ucConfig; // for DP training command
+ UCHAR ucI2cId; // use for GET_SINK_TYPE command
};
UCHAR ucAction;
UCHAR ucStatus;
UCHAR ucLaneNum;
UCHAR ucReserved[2];
-} DP_ENCODER_SERVICE_PARAMETERS;
+}DP_ENCODER_SERVICE_PARAMETERS;
-/* ucAction */
+// ucAction
#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
+/* obselete */
#define ATOM_DP_ACTION_TRAINING_START 0x02
#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
#define ATOM_DP_ACTION_BLANKING 0x07
-/* ucConfig */
+// ucConfig
#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_DP_CONFIG_LINK_A 0x00
#define ATOM_DP_CONFIG_LINK_B 0x04
-
+/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/* DP_TRAINING_TABLE */
-#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
-#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
-#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
-#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
-typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
- UCHAR ucI2CSpeed;
- union {
- UCHAR ucRegIndex;
- UCHAR ucStatus;
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+ UCHAR ucI2CSpeed;
+ union
+ {
+ UCHAR ucRegIndex;
+ UCHAR ucStatus;
};
- USHORT lpI2CDataOut;
- UCHAR ucFlag;
- UCHAR ucTransBytes;
- UCHAR ucSlaveAddr;
- UCHAR ucLineNumber;
-} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+ USHORT lpI2CDataOut;
+ UCHAR ucFlag;
+ UCHAR ucTransBytes;
+ UCHAR ucSlaveAddr;
+ UCHAR ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
-/* ucFlag */
+//ucFlag
#define HW_I2C_WRITE 1
#define HW_I2C_READ 0
+#define I2C_2BYTE_ADDR 0x02
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+ UCHAR ucReserved[3];
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK 0x07
+#define HWBLKINST_HWBLK_MASK 0xF0
+#define HWBLKINST_HWBLK_SHIFT 0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE 0
+#define SELECT_DISP_PLL 1
+#define SELECT_DCIO_UNIPHY_LINK0 2
+#define SELECT_DCIO_UNIPHY_LINK1 3
+#define SELECT_DCIO_IMPCAL 4
+#define SELECT_DCIO_DIG 6
+#define SELECT_CRTC_PIXEL_RATE 7
+
+/****************************************************************************/
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
-/* Portion VI: Definitinos being oboselete */
+
+#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
+
+/****************************************************************************/
+//Portion VI: Definitinos being oboselete
/****************************************************************************/
-/* ========================================================================================== */
-/* Remove the definitions below when driver is ready! */
-typedef struct _ATOM_DAC_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10kHz unit */
- USHORT usReserved;
-} ATOM_DAC_INFO;
-
-typedef struct _COMPASSIONATE_DATA {
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- /* ============================== DAC1 portion */
- UCHAR ucDAC1_BG_Adjustment;
- UCHAR ucDAC1_DAC_Adjustment;
- USHORT usDAC1_FORCE_Data;
- /* ============================== DAC2 portion */
- UCHAR ucDAC2_CRT2_BG_Adjustment;
- UCHAR ucDAC2_CRT2_DAC_Adjustment;
- USHORT usDAC2_CRT2_FORCE_Data;
- USHORT usDAC2_CRT2_MUX_RegisterIndex;
- UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_NTSC_BG_Adjustment;
- UCHAR ucDAC2_NTSC_DAC_Adjustment;
- USHORT usDAC2_TV1_FORCE_Data;
- USHORT usDAC2_TV1_MUX_RegisterIndex;
- UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_CV_BG_Adjustment;
- UCHAR ucDAC2_CV_DAC_Adjustment;
- USHORT usDAC2_CV_FORCE_Data;
- USHORT usDAC2_CV_MUX_RegisterIndex;
- UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_PAL_BG_Adjustment;
- UCHAR ucDAC2_PAL_DAC_Adjustment;
- USHORT usDAC2_TV2_FORCE_Data;
-} COMPASSIONATE_DATA;
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10kHz unit
+ USHORT usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct _COMPASSIONATE_DATA
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ //============================== DAC1 portion
+ UCHAR ucDAC1_BG_Adjustment;
+ UCHAR ucDAC1_DAC_Adjustment;
+ USHORT usDAC1_FORCE_Data;
+ //============================== DAC2 portion
+ UCHAR ucDAC2_CRT2_BG_Adjustment;
+ UCHAR ucDAC2_CRT2_DAC_Adjustment;
+ USHORT usDAC2_CRT2_FORCE_Data;
+ USHORT usDAC2_CRT2_MUX_RegisterIndex;
+ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_NTSC_BG_Adjustment;
+ UCHAR ucDAC2_NTSC_DAC_Adjustment;
+ USHORT usDAC2_TV1_FORCE_Data;
+ USHORT usDAC2_TV1_MUX_RegisterIndex;
+ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_CV_BG_Adjustment;
+ UCHAR ucDAC2_CV_DAC_Adjustment;
+ USHORT usDAC2_CV_FORCE_Data;
+ USHORT usDAC2_CV_MUX_RegisterIndex;
+ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_PAL_BG_Adjustment;
+ UCHAR ucDAC2_PAL_DAC_Adjustment;
+ USHORT usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
/****************************Supported Device Info Table Definitions**********************/
-/* ucConnectInfo: */
-/* [7:4] - connector type */
-/* = 1 - VGA connector */
-/* = 2 - DVI-I */
-/* = 3 - DVI-D */
-/* = 4 - DVI-A */
-/* = 5 - SVIDEO */
-/* = 6 - COMPOSITE */
-/* = 7 - LVDS */
-/* = 8 - DIGITAL LINK */
-/* = 9 - SCART */
-/* = 0xA - HDMI_type A */
-/* = 0xB - HDMI_type B */
-/* = 0xE - Special case1 (DVI+DIN) */
-/* Others=TBD */
-/* [3:0] - DAC Associated */
-/* = 0 - no DAC */
-/* = 1 - DACA */
-/* = 2 - DACB */
-/* = 3 - External DAC */
-/* Others=TBD */
-/* */
-
-typedef struct _ATOM_CONNECTOR_INFO {
+// ucConnectInfo:
+// [7:4] - connector type
+// = 1 - VGA connector
+// = 2 - DVI-I
+// = 3 - DVI-D
+// = 4 - DVI-A
+// = 5 - SVIDEO
+// = 6 - COMPOSITE
+// = 7 - LVDS
+// = 8 - DIGITAL LINK
+// = 9 - SCART
+// = 0xA - HDMI_type A
+// = 0xB - HDMI_type B
+// = 0xE - Special case1 (DVI+DIN)
+// Others=TBD
+// [3:0] - DAC Associated
+// = 0 - no DAC
+// = 1 - DACA
+// = 2 - DACB
+// = 3 - External DAC
+// Others=TBD
+//
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
#if ATOM_BIG_ENDIAN
- UCHAR bfConnectorType:4;
- UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
#else
- UCHAR bfAssociatedDAC:4;
- UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
#endif
-} ATOM_CONNECTOR_INFO;
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+ ATOM_CONNECTOR_INFO sbfAccess;
+ UCHAR ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
-typedef union _ATOM_CONNECTOR_INFO_ACCESS {
- ATOM_CONNECTOR_INFO sbfAccess;
- UCHAR ucAccess;
-} ATOM_CONNECTOR_INFO_ACCESS;
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_CONNECTOR_INFO_I2C {
- ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
-} ATOM_SUPPORTED_DEVICES_INFO;
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
#define NO_INT_SRC_MAPPED 0xFF
-typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
- UCHAR ucIntSrcBitmap;
-} ATOM_CONNECTOR_INC_SRC_BITMAP;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
- ATOM_CONNECTOR_INC_SRC_BITMAP
- asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
-} ATOM_SUPPORTED_DEVICES_INFO_2;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
- ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_SUPPORTED_DEVICES_INFO_2d1;
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+ UCHAR ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
-typedef struct _ATOM_MISC_CONTROL_INFO {
- USHORT usFrequency;
- UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
- UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
- UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
- UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
-} ATOM_MISC_CONTROL_INFO;
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+ USHORT usFrequency;
+ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
+ UCHAR ucPLL_DutyCycle; // PLL duty cycle control
+ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
+ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;
+
#define ATOM_MAX_MISC_INFO 4
-typedef struct _ATOM_TMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10Khz */
- ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
-} ATOM_TMDS_INFO;
+typedef struct _ATOM_TMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10Khz
+ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
-typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
- UCHAR ucTVStandard; /* Same as TV standards defined above, */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_ANALOG_ATTRIBUTE;
-typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
- UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+ UCHAR ucTVStandard; //Same as TV standards defined above,
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
-typedef union _ATOM_ENCODER_ATTRIBUTE {
- ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
- ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
-} ATOM_ENCODER_ATTRIBUTE;
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+ UCHAR ucAttribute; //Same as other digital encoder attributes defined above
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock;
- USHORT usEncoderID;
- UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- ATOM_ENCODER_ATTRIBUTE usDevAttr;
-} DVO_ENCODER_CONTROL_PARAMETERS;
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock;
+ USHORT usEncoderID;
+ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ ATOM_ENCODER_ATTRIBUTE usDevAttr;
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
-typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
- DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} DVO_ENCODER_CONTROL_PS_ALLOCATION;
#define ATOM_XTMDS_ASIC_SI164_ID 1
#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
#define ATOM_XTMDS_MVPU_FPGA 0x00000004
-typedef struct _ATOM_XTMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usSingleLinkMaxFrequency;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
- UCHAR ucXtransimitterID;
- UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
- UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
- /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
- UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
- UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
-} ATOM_XTMDS_INFO;
-
-typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
- UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
- UCHAR ucPadding[2];
-} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+typedef struct _ATOM_XTMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usSingleLinkMaxFrequency;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
+ UCHAR ucXtransimitterID;
+ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
+ // due to design. This ID is used to alert driver that the sequence is not "standard"!
+ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
+ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
+ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
+ UCHAR ucPadding[2];
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
/****************************Legacy Power Play Table Definitions **********************/
-/* Definitions for ulPowerPlayMiscInfo */
+//Definitions for ulPowerPlayMiscInfo
#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
-#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
-
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
+
#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
-#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
-#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
-#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
-#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
- /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
+ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
-#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulReserved1; /* must set to 0 */
- ULONG ulReserved2; /* must set to 0 */
- USHORT usEngineClock;
- USHORT usMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO_V2 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO_V2;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_POWERMODE_INFO_V3 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
- UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
-} ATOM_POWERMODE_INFO_V3;
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulReserved1; // must set to 0
+ ULONG ulReserved2; // must set to 0
+ USHORT usEngineClock;
+ USHORT usMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO_V2
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_POWERMODE_INFO_V3
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
-#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
-
-typedef struct _ATOM_POWERPLAY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO;
-
-typedef struct _ATOM_POWERPLAY_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V2;
-
-typedef struct _ATOM_POWERPLAY_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V3;
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
+
+
+typedef struct _ATOM_POWERPLAY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
/* New PPlib */
/**************************************************************************/
@@ -4718,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -4725,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -4738,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -4773,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -4792,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -4816,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -4836,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -4858,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
@@ -4871,42 +5967,44 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
+ ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
/**************************************************************************/
-/* Following definitions are for compatiblity issue in different SW components. */
+
+// Following definitions are for compatiblity issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
-#define Object_Info Object_Header
+#define Object_Info Object_Header
#define AdjustARB_SEQ MC_InitParameter
#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
-#define ASIC_VDDCI_Info ASIC_ProfilingInfo
+#define ASIC_VDDCI_Info ASIC_ProfilingInfo
#define ASIC_MVDDQ_Info MemoryTrainingInfo
-#define SS_Info PPLL_SS_Info
+#define SS_Info PPLL_SS_Info
#define ASIC_MVDDC_Info ASIC_InternalSS_Info
#define DispDevicePriorityInfo SaveRestoreInfo
#define DispOutInfo TV_VideoMode
+
#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
-/* New device naming, remove them when both DAL/VBIOS is ready */
+//New device naming, remove them when both DAL/VBIOS is ready
#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
@@ -4921,7 +6019,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
-
+
#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
@@ -4939,7 +6037,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S3_DFP2I_ACTIVEb1 0x02
-#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +6056,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
-#define TMDS1XEncoderControl DVOEncoderControl
+#define TMDS1XEncoderControl DVOEncoderControl
#define DFP1XOutputControl DVOOutputControl
#define ExternalDFPOutputControl DFP1XOutputControl
#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
#define DFP1IOutputControl TMDSAOutputControl
-#define DFP2IOutputControl LVTMAOutputControl
+#define DFP2IOutputControl LVTMAOutputControl
#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6072,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
#define ucDac1Standard ucDacStandard
-#define ucDac2Standard ucDacStandard
+#define ucDac2Standard ucDacStandard
#define TMDS1EncoderControl TMDSAEncoderControl
#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6082,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define CRT1OutputControl DAC1OutputControl
#define CRT2OutputControl DAC2OutputControl
-/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
-#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2 0x00100000L
+#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
+#define ATOM_S2_CV_DPMS_STATE 0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
+#define ATOM_S2_TV1_DPMS_STATEb2 0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
+#define ATOM_S2_TV2_DPMS_STATEb2 0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
+#define ATOM_S2_CV_DPMS_STATEb3 0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
/*********************************************************************************/
-#pragma pack() /* BIOS data must use byte aligment */
+#pragma pack() // BIOS data must use byte aligment
#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e351fbd..8c2d6478a221 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
@@ -245,10 +245,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_enable_crtc(crtc, 1);
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 1);
- atombios_blank_crtc(crtc, 0);
+ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
@@ -256,10 +259,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
- atombios_blank_crtc(crtc, 1);
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 0);
- atombios_enable_crtc(crtc, 0);
+ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -349,7 +355,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+static void atombios_disable_ss(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u32 ss_cntl;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ }
+}
+
+
+union atom_enable_ss {
+ ENABLE_LVDS_SS_PARAMETERS legacy;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+};
+
+static void atombios_enable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -358,11 +412,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
struct radeon_encoder *radeon_encoder = NULL;
struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
- ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
- ENABLE_LVDS_SS_PARAMETERS legacy_args;
+ union atom_enable_ss args;
uint16_t percentage = 0;
uint8_t type = 0, step = 0, delay = 0, range = 0;
+ /* XXX add ss support for DCE4 */
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -375,9 +432,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
step = dig->ss->step;
delay = dig->ss->delay;
range = dig->ss->range;
- } else if (enable)
+ } else
return;
- } else if (enable)
+ } else
return;
break;
}
@@ -386,29 +443,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
if (!radeon_encoder)
return;
+ memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
- memset(&args, 0, sizeof(args));
- args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.ucSpreadSpectrumType = type;
- args.ucSpreadSpectrumStep = step;
- args.ucSpreadSpectrumDelay = delay;
- args.ucSpreadSpectrumRange = range;
- args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.v1.ucSpreadSpectrumType = type;
+ args.v1.ucSpreadSpectrumStep = step;
+ args.v1.ucSpreadSpectrumDelay = delay;
+ args.v1.ucSpreadSpectrumRange = range;
+ args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucEnable = ATOM_ENABLE;
} else {
- memset(&legacy_args, 0, sizeof(legacy_args));
- legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- legacy_args.ucSpreadSpectrumType = type;
- legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
- legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- legacy_args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
+ args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.legacy.ucSpreadSpectrumType = type;
+ args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
+ args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
+ args.legacy.ucEnable = ATOM_ENABLE;
}
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
union adjust_pixel_clock {
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
};
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,15 +476,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
+ int encoder_mode = 0;
/* reset the pll flags */
pll->flags = 0;
+ /* select the PLL algo */
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ pll->algo = PLL_ALGO_LEGACY;
+ else
+ pll->algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+ pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
@@ -448,10 +518,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ pll->algo = PLL_ALGO_LEGACY;
+ pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,17 +543,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
*/
if (ASIC_IS_DCE3(rdev)) {
union adjust_pixel_clock args;
- struct radeon_encoder_atom_dig *dig;
u8 frev, crev;
int index;
- if (!radeon_encoder->enc_priv)
- return adjusted_clock;
- dig = radeon_encoder->enc_priv;
-
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
- &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return adjusted_clock;
memset(&args, 0, sizeof(args));
@@ -489,12 +560,56 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
case 2:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
- args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ args.v1.ucEncodeMode = encoder_mode;
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
+ case 3:
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v3.sInput.ucEncodeMode = encoder_mode;
+ args.v3.sInput.ucDispPllConfig = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ }
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+ if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = args.v3.sOutput.ucRefDiv;
+ }
+ if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_POST_DIV;
+ pll->post_div = args.v3.sOutput.ucPostDiv;
+ }
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
@@ -513,9 +628,48 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS v1;
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
+ PIXEL_CLOCK_PARAMETERS_V5 v5;
};
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 5:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v5.ucCRTC = ATOM_CRTC_INVALID;
+ args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.ucPpll = ATOM_DCPLL;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -529,12 +683,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
+ int encoder_mode = 0;
memset(&args, 0, sizeof(args));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
@@ -542,30 +698,30 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!radeon_encoder)
return;
- if (radeon_crtc->crtc_id == 0)
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
- else
+ break;
+ case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ default:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll)
- radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- } else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
- &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
switch (frev) {
case 1:
@@ -576,8 +732,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucPpll = radeon_crtc->pll_id;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucRefDivSrc = 1;
break;
@@ -587,8 +742,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v2.ucPpll = radeon_crtc->pll_id;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucRefDivSrc = 1;
break;
@@ -598,12 +752,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucPpll = radeon_crtc->pll_id;
+ args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
- args.v3.ucEncoderMode =
- atombios_get_encoder_mode(encoder);
+ args.v3.ucEncoderMode = encoder_mode;
+ break;
+ case 5:
+ args.v5.ucCRTC = radeon_crtc->crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucRefDiv = ref_div;
+ args.v5.usFbDiv = cpu_to_le16(fb_div);
+ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v5.ucPostDiv = post_div;
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+ args.v5.ucPpll = radeon_crtc->pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +782,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case 15:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ break;
+ case 16:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ break;
+ case 24:
+ case 32:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ crtc->fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ break;
+ case 1:
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ break;
+ case 2:
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ break;
+ case 3:
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ break;
+ case 4:
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ break;
+ case 5:
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ crtc->mode.vdisplay);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -755,7 +1053,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1085,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
}
}
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+ struct drm_crtc *test_crtc;
+ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ /* if crtc is driving DP and we have an ext clock, use that */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
+ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+ if (rdev->clock.dp_extclk)
+ return ATOM_PPLL_INVALID;
+ }
+ }
+ }
+
+ /* otherwise, pick one of the plls */
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_test_crtc;
+
+ if (crtc == test_crtc)
+ continue;
+
+ radeon_test_crtc = to_radeon_crtc(test_crtc);
+ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+ pll_in_use |= (1 << radeon_test_crtc->pll_id);
+ }
+ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+ return ATOM_PPLL2;
+ } else
+ return radeon_crtc->crtc_id;
+
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -796,19 +1136,24 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
- atombios_set_ss(crtc, 0);
+ atombios_disable_ss(crtc);
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev))
+ atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_set_ss(crtc, 1);
- atombios_crtc_set_timing(crtc, adjusted_mode);
+ atombios_enable_ss(crtc);
- if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_base(crtc, x, y, old_fb);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ else if (ASIC_IS_AVIVO(rdev))
+ atombios_crtc_set_timing(crtc, adjusted_mode);
else {
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- atombios_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_fixup(crtc);
}
+ atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
return 0;
@@ -818,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
@@ -825,14 +1176,19 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_lock_crtc(crtc, 1);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
+ atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- atombios_lock_crtc(crtc, 0);
+ atombios_lock_crtc(crtc, ATOM_DISABLE);
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1204,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
- if (radeon_crtc->crtc_id == 1)
- radeon_crtc->crtc_offset =
- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ default:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ break;
+ }
+ } else {
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+ radeon_crtc->pll_id = -1;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a682d59..abffb1499e22 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
train_set[lane] = v | p;
}
+union aux_channel_transaction {
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
- PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
retry:
memcpy(base, req_bytes, num_bytes);
- args.lpAuxRequest = 0;
- args.lpDataOut = 16;
- args.ucDataOutLen = 0;
- args.ucChannelID = chan->rec.i2c_id;
- args.ucDelay = delay / 10;
+ args.v1.lpAuxRequest = 0;
+ args.v1.lpDataOut = 16;
+ args.v1.ucDataOutLen = 0;
+ args.v1.ucChannelID = chan->rec.i2c_id;
+ args.v1.ucDelay = delay / 10;
+ if (ASIC_IS_DCE4(rdev))
+ args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus && !args.ucDataOutLen) {
- if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
+ if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus, retry_count);
+ chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
}
- if (args.ucDataOutLen && read_byte && read_buf_len) {
- if (read_buf_len < args.ucDataOutLen) {
+ if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
- read_buf_len, args.ucDataOutLen);
+ read_buf_len, args.v1.ucDataOutLen);
return false;
}
{
- int len = min(read_buf_len, args.ucDataOutLen);
+ int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
- /* start training on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
- dig_connector->dp_clock, enc_id, 0);
- /* set training pattern 1 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* start training on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
+ /* set training pattern 1 on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
+ } else {
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+ }
/* set initial vs/emph */
memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 1);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
@@ -731,8 +747,12 @@ void dp_link_train(struct drm_encoder *encoder,
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
- dig_connector->dp_clock, enc_id, 0);
+ /* disable the training pattern on the source */
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e4a938..3c391e7e9fd4 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
#define D1CRTC_CONTROL 0x6080
#define CRTC_EN (1 << 0)
+#define D1CRTC_STATUS 0x609c
#define D1CRTC_UPDATE_LOCK 0x60E8
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D2CRTC_CONTROL 0x6880
+#define D2CRTC_STATUS 0x689c
#define D2CRTC_UPDATE_LOCK 0x68E8
#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 000000000000..1caf625e472b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,2228 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "evergreend.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
+ return connected;
+}
+
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (rdev->irq.installed)
+ evergreen_irq_set(rdev);
+}
+
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(SRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+/*
+ * GART
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
+int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ evergreen_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r;
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+ evergreen_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+void evergreen_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+}
+
+static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ save->vga_control[0] = RREG32(D1VGA_CONTROL);
+ save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(D1VGA_CONTROL, 0);
+ WREG32(D2VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+}
+
+static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* Unlock host access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ /* Restore video state */
+ WREG32(D1VGA_CONTROL, save->vga_control[0]);
+ WREG32(D2VGA_CONTROL, save->vga_control[1]);
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void evergreen_mc_program(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ evergreen_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+/*
+ * CP.
+ */
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
+
+/*
+ * Core functions
+ */
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 cur_pipe;
+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+ num_tile_pipes = EVERGREEN_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > EVERGREEN_MAX_BACKENDS)
+ num_backends = EVERGREEN_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ force_no_swizzle = false;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 grbm_gfx_index;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+ & EVERGREEN_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+ & EVERGREEN_MAX_SIMDS_MASK);
+
+ cc_rb_backend_disable =
+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+ & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+ gb_addr_config |= ROW_SIZE(2);
+ else
+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+ if (rdev->ddev->pdev->device == 0x689e) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_131_124;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+ switch(efuse_box_bit_131_124) {
+ case 0x00:
+ gb_backend_map = 0x76543210;
+ break;
+ case 0x55:
+ gb_backend_map = 0x77553311;
+ break;
+ case 0x56:
+ gb_backend_map = 0x77553300;
+ break;
+ case 0x59:
+ gb_backend_map = 0x77552211;
+ break;
+ case 0x66:
+ gb_backend_map = 0x77443300;
+ break;
+ case 0x99:
+ gb_backend_map = 0x66552211;
+ break;
+ case 0x5a:
+ gb_backend_map = 0x77552200;
+ break;
+ case 0xaa:
+ gb_backend_map = 0x66442200;
+ break;
+ case 0x95:
+ gb_backend_map = 0x66553311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else if (rdev->ddev->pdev->device == 0x68b9) {
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_127_124;
+
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+ switch(efuse_box_bit_127_124) {
+ case 0x0:
+ gb_backend_map = 0x00003210;
+ break;
+ case 0x5:
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ gb_backend_map = 0x00003311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+ u32 sp = cc_gc_shader_pipe_config;
+ u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+ if (i == num_shader_engines) {
+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ }
+
+ WREG32(GRBM_GFX_INDEX, gfx);
+ WREG32(RLC_GFX_INDEX, gfx);
+
+ WREG32(CC_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+ }
+
+ grbm_gfx_index |= SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if (rdev->family == CHIP_CEDAR)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ if (rdev->family == CHIP_CEDAR)
+ ps_thread_count = 96;
+ else
+ ps_thread_count = 128;
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_CEDAR)
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ else
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+ WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ /* clear render buffer base addresses */
+ WREG32(CB_COLOR0_BASE, 0);
+ WREG32(CB_COLOR1_BASE, 0);
+ WREG32(CB_COLOR2_BASE, 0);
+ WREG32(CB_COLOR3_BASE, 0);
+ WREG32(CB_COLOR4_BASE, 0);
+ WREG32(CB_COLOR5_BASE, 0);
+ WREG32(CB_COLOR6_BASE, 0);
+ WREG32(CB_COLOR7_BASE, 0);
+ WREG32(CB_COLOR8_BASE, 0);
+ WREG32(CB_COLOR9_BASE, 0);
+ WREG32(CB_COLOR10_BASE, 0);
+ WREG32(CB_COLOR11_BASE, 0);
+
+ /* set the shader const cache sizes to 0 */
+ for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+ WREG32(i, 0);
+ for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+ WREG32(i, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ /* Setup GPU memory space */
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r600_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+
+ return 0;
+}
+
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+{
+ /* FIXME: implement for evergreen */
+ return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 srbm_reset = 0;
+ u32 grbm_reset = 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ /* reset all the system blocks */
+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ case 1:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ case 2:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ case 3:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ case 4:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ case 5:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ default:
+ return 0;
+ }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+ return 0;
+}
+
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2,
+ u32 *disp_int_cont3,
+ u32 *disp_int_cont4,
+ u32 *disp_int_cont5)
+{
+ u32 tmp;
+
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = evergreen_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 2);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 3);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 4);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 5);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int_cont & DC_HPD2_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ evergreen_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreen_agp_enable(rdev);
+ } else {
+ r = evergreen_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ evergreen_gpu_init(rdev);
+#if 0
+ if (!rdev->r600_blit.shader_obj) {
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("failed to pin blit object %d\n", r);
+ return r;
+ }
+#endif
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = evergreen_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = evergreen_cp_resume(rdev);
+ if (r)
+ return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
+
+ return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_startup(rdev);
+ if (r) {
+ DRM_ERROR("r600 startup failed on resume\n");
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+
+ return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+#if 0
+ int r;
+#endif
+ /* FIXME: we should wait for ring to be empty */
+ r700_cp_stop(rdev);
+ rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
+ r600_wb_disable(rdev);
+ evergreen_pcie_gart_disable(rdev);
+#if 0
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+static bool evergreen_card_posted(struct radeon_device *rdev)
+{
+ u32 reg;
+
+ /* first check CRTCs */
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+
+ /* then check MEM_SIZE, in case the crtcs are off */
+ if (RREG32(CONFIG_MEMSIZE))
+ return true;
+
+ return false;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* Post card if necessary */
+ if (!evergreen_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = evergreen_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+ return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+ /*r600_blit_fini(rdev);*/
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ radeon_dummy_page_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 000000000000..345a75a03c96
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,1354 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+
+struct evergreen_cs_track {
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 nsamples;
+ u32 cb_color_base_last[12];
+ struct radeon_bo *cb_color_bo[12];
+ u32 cb_color_bo_offset[12];
+ struct radeon_bo *cb_color_fmask_bo[8];
+ struct radeon_bo *cb_color_cmask_bo[8];
+ u32 cb_color_info[12];
+ u32 cb_color_view[12];
+ u32 cb_color_pitch_idx[12];
+ u32 cb_color_slice_idx[12];
+ u32 cb_color_dim_idx[12];
+ u32 cb_color_dim[12];
+ u32 cb_color_pitch[12];
+ u32 cb_color_slice[12];
+ u32 cb_color_cmask_slice[8];
+ u32 cb_color_fmask_slice[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 vgt_strmout_config;
+ u32 vgt_strmout_buffer_config;
+ u32 db_depth_control;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_depth_size_idx;
+ u32 db_z_info;
+ u32 db_z_idx;
+ u32 db_z_read_offset;
+ u32 db_z_write_offset;
+ struct radeon_bo *db_z_read_bo;
+ struct radeon_bo *db_z_write_bo;
+ u32 db_s_info;
+ u32 db_s_idx;
+ u32 db_s_read_offset;
+ u32 db_s_write_offset;
+ struct radeon_bo *db_s_read_bo;
+ struct radeon_bo *db_s_write_bo;
+};
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_fmask_bo[i] = NULL;
+ track->cb_color_cmask_bo[i] = NULL;
+ track->cb_color_cmask_slice[i] = 0;
+ track->cb_color_fmask_slice[i] = 0;
+ }
+
+ for (i = 0; i < 12; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0;
+ track->cb_color_pitch_idx[i] = 0;
+ track->cb_color_slice_idx[i] = 0;
+ track->cb_color_dim[i] = 0;
+ track->cb_color_pitch[i] = 0;
+ track->cb_color_slice[i] = 0;
+ track->cb_color_dim[i] = 0;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+ track->db_z_info = 0xFFFFFFFF;
+ track->db_z_idx = 0xFFFFFFFF;
+ track->db_z_read_offset = 0xFFFFFFFF;
+ track->db_z_write_offset = 0xFFFFFFFF;
+ track->db_z_read_bo = NULL;
+ track->db_z_write_bo = NULL;
+ track->db_s_info = 0xFFFFFFFF;
+ track->db_s_idx = 0xFFFFFFFF;
+ track->db_s_read_offset = 0xFFFFFFFF;
+ track->db_s_write_offset = 0xFFFFFFFF;
+ track->db_s_read_bo = NULL;
+ track->db_s_write_bo = NULL;
+}
+
+static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+
+ /* we don't support stream out buffer yet */
+ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+
+ /* XXX fill in */
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, wait_reg_mem;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg, wait_reg_mem_info;
+ volatile uint32_t *ib;
+
+ ib = p->ib->ptr;
+
+ /* parse the WAIT_REG_MEM */
+ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ if (r)
+ return r;
+
+ /* check its a WAIT_REG_MEM */
+ if (wait_reg_mem.type != PACKET_TYPE3 ||
+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+ /* bit 4 is reg (0) or mem (1) */
+ if (wait_reg_mem_info & 0x10) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ r = -EINVAL;
+ return r;
+ }
+ /* waiting for value to be equal */
+ if ((wait_reg_mem_info & 0x7) != 0x3) {
+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ r = -EINVAL;
+ return r;
+ }
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ /* jump over the NOP */
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += wait_reg_mem.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+ reg = CP_PACKET0_GET_REG(header);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ r = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ ib[h_idx + 4] = PACKET2(0);
+ ib[h_idx + 5] = PACKET2(0);
+ ib[h_idx + 6] = PACKET2(0);
+ ib[h_idx + 7] = PACKET2(0);
+ ib[h_idx + 8] = PACKET2(0);
+ } else {
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
+ ib[h_idx] = header;
+ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ return r;
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ int r;
+
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ r = evergreen_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ return r;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ unsigned reg, i;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ r = evergreen_packet0_check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case DB_Z_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case DB_STENCIL_INFO:
+ track->db_s_info = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case DB_Z_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_read_bo = reloc->robj;
+ break;
+ case DB_Z_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_write_bo = reloc->robj;
+ break;
+ case DB_STENCIL_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_read_bo = reloc->robj;
+ break;
+ case DB_STENCIL_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_write_bo = reloc->robj;
+ break;
+ case VGT_STRMOUT_CONFIG:
+ track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+ break;
+ case VGT_STRMOUT_BUFFER_CONFIG:
+ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+ break;
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case PA_SC_AA_CONFIG:
+ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
+ case CB_COLOR0_VIEW:
+ case CB_COLOR1_VIEW:
+ case CB_COLOR2_VIEW:
+ case CB_COLOR3_VIEW:
+ case CB_COLOR4_VIEW:
+ case CB_COLOR5_VIEW:
+ case CB_COLOR6_VIEW:
+ case CB_COLOR7_VIEW:
+ tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR8_VIEW:
+ case CB_COLOR9_VIEW:
+ case CB_COLOR10_VIEW:
+ case CB_COLOR11_VIEW:
+ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_INFO:
+ case CB_COLOR1_INFO:
+ case CB_COLOR2_INFO:
+ case CB_COLOR3_INFO:
+ case CB_COLOR4_INFO:
+ case CB_COLOR5_INFO:
+ case CB_COLOR6_INFO:
+ case CB_COLOR7_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR8_INFO:
+ case CB_COLOR9_INFO:
+ case CB_COLOR10_INFO:
+ case CB_COLOR11_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR0_PITCH:
+ case CB_COLOR1_PITCH:
+ case CB_COLOR2_PITCH:
+ case CB_COLOR3_PITCH:
+ case CB_COLOR4_PITCH:
+ case CB_COLOR5_PITCH:
+ case CB_COLOR6_PITCH:
+ case CB_COLOR7_PITCH:
+ tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_PITCH:
+ case CB_COLOR9_PITCH:
+ case CB_COLOR10_PITCH:
+ case CB_COLOR11_PITCH:
+ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_SLICE:
+ case CB_COLOR1_SLICE:
+ case CB_COLOR2_SLICE:
+ case CB_COLOR3_SLICE:
+ case CB_COLOR4_SLICE:
+ case CB_COLOR5_SLICE:
+ case CB_COLOR6_SLICE:
+ case CB_COLOR7_SLICE:
+ tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_SLICE:
+ case CB_COLOR9_SLICE:
+ case CB_COLOR10_SLICE:
+ case CB_COLOR11_SLICE:
+ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_ATTRIB:
+ case CB_COLOR1_ATTRIB:
+ case CB_COLOR2_ATTRIB:
+ case CB_COLOR3_ATTRIB:
+ case CB_COLOR4_ATTRIB:
+ case CB_COLOR5_ATTRIB:
+ case CB_COLOR6_ATTRIB:
+ case CB_COLOR7_ATTRIB:
+ case CB_COLOR8_ATTRIB:
+ case CB_COLOR9_ATTRIB:
+ case CB_COLOR10_ATTRIB:
+ case CB_COLOR11_ATTRIB:
+ break;
+ case CB_COLOR0_DIM:
+ case CB_COLOR1_DIM:
+ case CB_COLOR2_DIM:
+ case CB_COLOR3_DIM:
+ case CB_COLOR4_DIM:
+ case CB_COLOR5_DIM:
+ case CB_COLOR6_DIM:
+ case CB_COLOR7_DIM:
+ tmp = (reg - CB_COLOR0_DIM) / 0x3c;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_DIM:
+ case CB_COLOR9_DIM:
+ case CB_COLOR10_DIM:
+ case CB_COLOR11_DIM:
+ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_FMASK:
+ case CB_COLOR1_FMASK:
+ case CB_COLOR2_FMASK:
+ case CB_COLOR3_FMASK:
+ case CB_COLOR4_FMASK:
+ case CB_COLOR5_FMASK:
+ case CB_COLOR6_FMASK:
+ case CB_COLOR7_FMASK:
+ tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_fmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_CMASK:
+ case CB_COLOR1_CMASK:
+ case CB_COLOR2_CMASK:
+ case CB_COLOR3_CMASK:
+ case CB_COLOR4_CMASK:
+ case CB_COLOR5_CMASK:
+ case CB_COLOR6_CMASK:
+ case CB_COLOR7_CMASK:
+ tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_cmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_FMASK_SLICE:
+ case CB_COLOR1_FMASK_SLICE:
+ case CB_COLOR2_FMASK_SLICE:
+ case CB_COLOR3_FMASK_SLICE:
+ case CB_COLOR4_FMASK_SLICE:
+ case CB_COLOR5_FMASK_SLICE:
+ case CB_COLOR6_FMASK_SLICE:
+ case CB_COLOR7_FMASK_SLICE:
+ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_CMASK_SLICE:
+ case CB_COLOR1_CMASK_SLICE:
+ case CB_COLOR2_CMASK_SLICE:
+ case CB_COLOR3_CMASK_SLICE:
+ case CB_COLOR4_CMASK_SLICE:
+ case CB_COLOR5_CMASK_SLICE:
+ case CB_COLOR6_CMASK_SLICE:
+ case CB_COLOR7_CMASK_SLICE:
+ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR8_BASE:
+ case CB_COLOR9_BASE:
+ case CB_COLOR10_BASE:
+ case CB_COLOR11_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_IMMED0_BASE:
+ case CB_IMMED1_BASE:
+ case CB_IMMED2_BASE:
+ case CB_IMMED3_BASE:
+ case CB_IMMED4_BASE:
+ case CB_IMMED5_BASE:
+ case CB_IMMED6_BASE:
+ case CB_IMMED7_BASE:
+ case CB_IMMED8_BASE:
+ case CB_IMMED9_BASE:
+ case CB_IMMED10_BASE:
+ case CB_IMMED11_BASE:
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_PGM_START_HS:
+ case SQ_PGM_START_LS:
+ case GDS_ADDR_BASE:
+ case SQ_CONST_MEM_BASE:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ case SQ_ALU_CONST_CACHE_HS_0:
+ case SQ_ALU_CONST_CACHE_HS_1:
+ case SQ_ALU_CONST_CACHE_HS_2:
+ case SQ_ALU_CONST_CACHE_HS_3:
+ case SQ_ALU_CONST_CACHE_HS_4:
+ case SQ_ALU_CONST_CACHE_HS_5:
+ case SQ_ALU_CONST_CACHE_HS_6:
+ case SQ_ALU_CONST_CACHE_HS_7:
+ case SQ_ALU_CONST_CACHE_HS_8:
+ case SQ_ALU_CONST_CACHE_HS_9:
+ case SQ_ALU_CONST_CACHE_HS_10:
+ case SQ_ALU_CONST_CACHE_HS_11:
+ case SQ_ALU_CONST_CACHE_HS_12:
+ case SQ_ALU_CONST_CACHE_HS_13:
+ case SQ_ALU_CONST_CACHE_HS_14:
+ case SQ_ALU_CONST_CACHE_HS_15:
+ case SQ_ALU_CONST_CACHE_LS_0:
+ case SQ_ALU_CONST_CACHE_LS_1:
+ case SQ_ALU_CONST_CACHE_LS_2:
+ case SQ_ALU_CONST_CACHE_LS_3:
+ case SQ_ALU_CONST_CACHE_LS_4:
+ case SQ_ALU_CONST_CACHE_LS_5:
+ case SQ_ALU_CONST_CACHE_LS_6:
+ case SQ_ALU_CONST_CACHE_LS_7:
+ case SQ_ALU_CONST_CACHE_LS_8:
+ case SQ_ALU_CONST_CACHE_LS_9:
+ case SQ_ALU_CONST_CACHE_LS_10:
+ case SQ_ALU_CONST_CACHE_LS_11:
+ case SQ_ALU_CONST_CACHE_LS_12:
+ case SQ_ALU_CONST_CACHE_LS_13:
+ case SQ_ALU_CONST_CACHE_LS_14:
+ case SQ_ALU_CONST_CACHE_LS_15:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct evergreen_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+ unsigned start_reg, end_reg, reg;
+ int r;
+ u32 idx_value;
+
+ track = (struct evergreen_cs_track *)p->track;
+ ib = p->ib->ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (pkt->opcode) {
+ case PACKET3_CONTEXT_CONTROL:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad CONTEXT_CONTROL\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_CLEAR_STATE:
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_BASE:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_2:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_AUTO:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_IMMD:
+ if (pkt->count < 2) {
+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_WAIT_REG_MEM:
+ if (pkt->count != 5) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ /* bit 4 is reg (0) or mem (1) */
+ if (idx_value & 0x10) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_SURFACE_SYNC:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ /* 0xffffffff/0x0 is flush all cache flag */
+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+ radeon_get_ib_value(p, idx + 2) != 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_EVENT_WRITE:
+ if (pkt->count != 2 && pkt->count != 0) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_EVENT_WRITE_EOP:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_EVENT_WRITE_EOS:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_CONTEXT_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_RESOURCE:
+ if (pkt->count % 8) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < (pkt->count / 8); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
+ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+ case SQ_TEX_VTX_VALID_TEXTURE:
+ /* tex base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ texture = reloc->robj;
+ /* tex mip base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = evergreen_check_texture_resource(p, idx+1+(i*8),
+ texture, mipmap);
+ if (r)
+ return r;
+ break;
+ case SQ_TEX_VTX_VALID_BUFFER:
+ /* vtx base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
+ }
+ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case SQ_TEX_VTX_INVALID_TEXTURE:
+ case SQ_TEX_VTX_INVALID_BUFFER:
+ default:
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_ALU_CONST:
+ /* XXX fix me ALU const buffers only */
+ break;
+ case PACKET3_SET_BOOL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+ DRM_ERROR("bad SET_BOOL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_LOOP_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+ DRM_ERROR("bad SET_LOOP_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CTL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+ DRM_ERROR("bad SET_CTL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_SAMPLER:
+ if (pkt->count % 3) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct evergreen_cs_track *track;
+ int r;
+
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ evergreen_cs_track_init(track);
+ track->npipes = p->rdev->config.evergreen.tiling_npipes;
+ track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
+ track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ p->track = track;
+ }
+ do {
+ r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ r = evergreen_cs_parse_packet0(p, &pkt);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = evergreen_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
+ return -EINVAL;
+ }
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ mdelay(1);
+ }
+#endif
+ kfree(p->track);
+ p->track = NULL;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 000000000000..e028c1cd9d9b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
+#define EVERGREEN_D3VGA_CONTROL 0x3e0
+#define EVERGREEN_D4VGA_CONTROL 0x3e4
+#define EVERGREEN_D5VGA_CONTROL 0x3e8
+#define EVERGREEN_D6VGA_CONTROL 0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL 0x414
+#define EVERGREEN_P2PLL_SS_CNTL 0x454
+# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE 0x6800
+#define EVERGREEN_GRPH_CONTROL 0x6804
+# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_DEPTH_8BPP 0
+# define EVERGREEN_GRPH_DEPTH_16BPP 1
+# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define EVERGREEN_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+# define EVERGREEN_GRPH_FORMAT_ARGB565 1
+# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+# define EVERGREEN_GRPH_FORMAT_AI88 3
+# define EVERGREEN_GRPH_FORMAT_MONO16 4
+# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+# define EVERGREEN_GRPH_FORMAT_RGB111110 6
+# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
+# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_RED_SEL_R 0
+# define EVERGREEN_GRPH_RED_SEL_G 1
+# define EVERGREEN_GRPH_RED_SEL_B 2
+# define EVERGREEN_GRPH_RED_SEL_A 3
+# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
+# define EVERGREEN_GRPH_GREEN_SEL_G 0
+# define EVERGREEN_GRPH_GREEN_SEL_B 1
+# define EVERGREEN_GRPH_GREEN_SEL_A 2
+# define EVERGREEN_GRPH_GREEN_SEL_R 3
+# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
+# define EVERGREEN_GRPH_BLUE_SEL_B 0
+# define EVERGREEN_GRPH_BLUE_SEL_A 1
+# define EVERGREEN_GRPH_BLUE_SEL_R 2
+# define EVERGREEN_GRPH_BLUE_SEL_G 3
+# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
+# define EVERGREEN_GRPH_ALPHA_SEL_A 0
+# define EVERGREEN_GRPH_ALPHA_SEL_R 1
+# define EVERGREEN_GRPH_ALPHA_SEL_G 2
+# define EVERGREEN_GRPH_ALPHA_SEL_B 3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
+# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
+# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+#define EVERGREEN_GRPH_PITCH 0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
+#define EVERGREEN_GRPH_X_START 0x682c
+#define EVERGREEN_GRPH_Y_START 0x6830
+#define EVERGREEN_GRPH_X_END 0x6834
+#define EVERGREEN_GRPH_Y_END 0x6838
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x6998
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
+#define EVERGREEN_CUR_POSITION 0x69a8
+#define EVERGREEN_CUR_HOT_SPOT 0x69ac
+#define EVERGREEN_CUR_COLOR1 0x69b0
+#define EVERGREEN_CUR_COLOR2 0x69b4
+#define EVERGREEN_CUR_UPDATE 0x69b8
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
+#define EVERGREEN_DC_LUT_CONTROL 0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
+
+#define EVERGREEN_DATA_FORMAT 0x6b00
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+#define EVERGREEN_VLINE_START_END 0x6b08
+#define EVERGREEN_VLINE_STATUS 0x6bb8
+# define EVERGREEN_VLINE_STAT (1 << 12)
+
+#define EVERGREEN_VIEWPORT_START 0x6d70
+#define EVERGREEN_VIEWPORT_SIZE 0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
+#define EVERGREEN_CRTC_CONTROL 0x6e70
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS 0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+
+#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 000000000000..a1cd621780e2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,1023 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define MSAA_NUM_SAMPLES_SHIFT 0
+#define MSAA_NUM_SAMPLES_MASK 0x3
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_VC_ACTION_ENA (1 << 24)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SMX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+#define PACKET3_RB_OFFSET 0x4B
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
+#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
+#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
+#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
+#define SQ_TEX_VTX_VALID_BUFFER 0x3
+
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define VGT_TF_RING_SIZE 0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE 0x28900
+#define SQ_GSVS_RING_ITEMSIZE 0x28904
+#define SQ_ESTMP_RING_ITEMSIZE 0x28908
+#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE 0x28910
+#define SQ_PSTMP_RING_ITEMSIZE 0x28914
+#define SQ_LSTMP_RING_ITEMSIZE 0x28830
+#define SQ_HSTMP_RING_ITEMSIZE 0x28834
+
+#define SQ_GS_VERT_ITEMSIZE 0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1 0x28920
+#define SQ_GS_VERT_ITEMSIZE_2 0x28924
+#define SQ_GS_VERT_ITEMSIZE_3 0x28928
+#define SQ_GSVS_RING_OFFSET_1 0x2892c
+#define SQ_GSVS_RING_OFFSET_2 0x28930
+#define SQ_GSVS_RING_OFFSET_3 0x28934
+
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80
+
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+
+#define DB_DEPTH_CONTROL 0x28800
+#define DB_DEPTH_VIEW 0x28008
+#define DB_HTILE_DATA_BASE 0x28014
+#define DB_Z_INFO 0x28040
+# define Z_ARRAY_MODE(x) ((x) << 4)
+#define DB_STENCIL_INFO 0x28044
+#define DB_Z_READ_BASE 0x28048
+#define DB_STENCIL_READ_BASE 0x2804c
+#define DB_Z_WRITE_BASE 0x28050
+#define DB_STENCIL_WRITE_BASE 0x28054
+#define DB_DEPTH_SIZE 0x28058
+
+#define SQ_PGM_START_PS 0x28840
+#define SQ_PGM_START_VS 0x2885c
+#define SQ_PGM_START_GS 0x28874
+#define SQ_PGM_START_ES 0x2888c
+#define SQ_PGM_START_FS 0x288a4
+#define SQ_PGM_START_HS 0x288b8
+#define SQ_PGM_START_LS 0x288d0
+
+#define VGT_STRMOUT_CONFIG 0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
+
+#define CB_TARGET_MASK 0x28238
+#define CB_SHADER_MASK 0x2823c
+
+#define GDS_ADDR_BASE 0x28720
+
+#define CB_IMMED0_BASE 0x28b9c
+#define CB_IMMED1_BASE 0x28ba0
+#define CB_IMMED2_BASE 0x28ba4
+#define CB_IMMED3_BASE 0x28ba8
+#define CB_IMMED4_BASE 0x28bac
+#define CB_IMMED5_BASE 0x28bb0
+#define CB_IMMED6_BASE 0x28bb4
+#define CB_IMMED7_BASE 0x28bb8
+#define CB_IMMED8_BASE 0x28bbc
+#define CB_IMMED9_BASE 0x28bc0
+#define CB_IMMED10_BASE 0x28bc4
+#define CB_IMMED11_BASE 0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define CB_COLOR0_BASE 0x28c60
+#define CB_COLOR0_PITCH 0x28c64
+#define CB_COLOR0_SLICE 0x28c68
+#define CB_COLOR0_VIEW 0x28c6c
+#define CB_COLOR0_INFO 0x28c70
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+#define CB_COLOR0_ATTRIB 0x28c74
+#define CB_COLOR0_DIM 0x28c78
+/* only CB0-7 blocks have these regs */
+#define CB_COLOR0_CMASK 0x28c7c
+#define CB_COLOR0_CMASK_SLICE 0x28c80
+#define CB_COLOR0_FMASK 0x28c84
+#define CB_COLOR0_FMASK_SLICE 0x28c88
+#define CB_COLOR0_CLEAR_WORD0 0x28c8c
+#define CB_COLOR0_CLEAR_WORD1 0x28c90
+#define CB_COLOR0_CLEAR_WORD2 0x28c94
+#define CB_COLOR0_CLEAR_WORD3 0x28c98
+
+#define CB_COLOR1_BASE 0x28c9c
+#define CB_COLOR2_BASE 0x28cd8
+#define CB_COLOR3_BASE 0x28d14
+#define CB_COLOR4_BASE 0x28d50
+#define CB_COLOR5_BASE 0x28d8c
+#define CB_COLOR6_BASE 0x28dc8
+#define CB_COLOR7_BASE 0x28e04
+#define CB_COLOR8_BASE 0x28e40
+#define CB_COLOR9_BASE 0x28e5c
+#define CB_COLOR10_BASE 0x28e78
+#define CB_COLOR11_BASE 0x28e94
+
+#define CB_COLOR1_PITCH 0x28ca0
+#define CB_COLOR2_PITCH 0x28cdc
+#define CB_COLOR3_PITCH 0x28d18
+#define CB_COLOR4_PITCH 0x28d54
+#define CB_COLOR5_PITCH 0x28d90
+#define CB_COLOR6_PITCH 0x28dcc
+#define CB_COLOR7_PITCH 0x28e08
+#define CB_COLOR8_PITCH 0x28e44
+#define CB_COLOR9_PITCH 0x28e60
+#define CB_COLOR10_PITCH 0x28e7c
+#define CB_COLOR11_PITCH 0x28e98
+
+#define CB_COLOR1_SLICE 0x28ca4
+#define CB_COLOR2_SLICE 0x28ce0
+#define CB_COLOR3_SLICE 0x28d1c
+#define CB_COLOR4_SLICE 0x28d58
+#define CB_COLOR5_SLICE 0x28d94
+#define CB_COLOR6_SLICE 0x28dd0
+#define CB_COLOR7_SLICE 0x28e0c
+#define CB_COLOR8_SLICE 0x28e48
+#define CB_COLOR9_SLICE 0x28e64
+#define CB_COLOR10_SLICE 0x28e80
+#define CB_COLOR11_SLICE 0x28e9c
+
+#define CB_COLOR1_VIEW 0x28ca8
+#define CB_COLOR2_VIEW 0x28ce4
+#define CB_COLOR3_VIEW 0x28d20
+#define CB_COLOR4_VIEW 0x28d5c
+#define CB_COLOR5_VIEW 0x28d98
+#define CB_COLOR6_VIEW 0x28dd4
+#define CB_COLOR7_VIEW 0x28e10
+#define CB_COLOR8_VIEW 0x28e4c
+#define CB_COLOR9_VIEW 0x28e68
+#define CB_COLOR10_VIEW 0x28e84
+#define CB_COLOR11_VIEW 0x28ea0
+
+#define CB_COLOR1_INFO 0x28cac
+#define CB_COLOR2_INFO 0x28ce8
+#define CB_COLOR3_INFO 0x28d24
+#define CB_COLOR4_INFO 0x28d60
+#define CB_COLOR5_INFO 0x28d9c
+#define CB_COLOR6_INFO 0x28dd8
+#define CB_COLOR7_INFO 0x28e14
+#define CB_COLOR8_INFO 0x28e50
+#define CB_COLOR9_INFO 0x28e6c
+#define CB_COLOR10_INFO 0x28e88
+#define CB_COLOR11_INFO 0x28ea4
+
+#define CB_COLOR1_ATTRIB 0x28cb0
+#define CB_COLOR2_ATTRIB 0x28cec
+#define CB_COLOR3_ATTRIB 0x28d28
+#define CB_COLOR4_ATTRIB 0x28d64
+#define CB_COLOR5_ATTRIB 0x28da0
+#define CB_COLOR6_ATTRIB 0x28ddc
+#define CB_COLOR7_ATTRIB 0x28e18
+#define CB_COLOR8_ATTRIB 0x28e54
+#define CB_COLOR9_ATTRIB 0x28e70
+#define CB_COLOR10_ATTRIB 0x28e8c
+#define CB_COLOR11_ATTRIB 0x28ea8
+
+#define CB_COLOR1_DIM 0x28cb4
+#define CB_COLOR2_DIM 0x28cf0
+#define CB_COLOR3_DIM 0x28d2c
+#define CB_COLOR4_DIM 0x28d68
+#define CB_COLOR5_DIM 0x28da4
+#define CB_COLOR6_DIM 0x28de0
+#define CB_COLOR7_DIM 0x28e1c
+#define CB_COLOR8_DIM 0x28e58
+#define CB_COLOR9_DIM 0x28e74
+#define CB_COLOR10_DIM 0x28e90
+#define CB_COLOR11_DIM 0x28eac
+
+#define CB_COLOR1_CMASK 0x28cb8
+#define CB_COLOR2_CMASK 0x28cf4
+#define CB_COLOR3_CMASK 0x28d30
+#define CB_COLOR4_CMASK 0x28d6c
+#define CB_COLOR5_CMASK 0x28da8
+#define CB_COLOR6_CMASK 0x28de4
+#define CB_COLOR7_CMASK 0x28e20
+
+#define CB_COLOR1_CMASK_SLICE 0x28cbc
+#define CB_COLOR2_CMASK_SLICE 0x28cf8
+#define CB_COLOR3_CMASK_SLICE 0x28d34
+#define CB_COLOR4_CMASK_SLICE 0x28d70
+#define CB_COLOR5_CMASK_SLICE 0x28dac
+#define CB_COLOR6_CMASK_SLICE 0x28de8
+#define CB_COLOR7_CMASK_SLICE 0x28e24
+
+#define CB_COLOR1_FMASK 0x28cc0
+#define CB_COLOR2_FMASK 0x28cfc
+#define CB_COLOR3_FMASK 0x28d38
+#define CB_COLOR4_FMASK 0x28d74
+#define CB_COLOR5_FMASK 0x28db0
+#define CB_COLOR6_FMASK 0x28dec
+#define CB_COLOR7_FMASK 0x28e28
+
+#define CB_COLOR1_FMASK_SLICE 0x28cc4
+#define CB_COLOR2_FMASK_SLICE 0x28d00
+#define CB_COLOR3_FMASK_SLICE 0x28d3c
+#define CB_COLOR4_FMASK_SLICE 0x28d78
+#define CB_COLOR5_FMASK_SLICE 0x28db4
+#define CB_COLOR6_FMASK_SLICE 0x28df0
+#define CB_COLOR7_FMASK_SLICE 0x28e2c
+
+#define CB_COLOR1_CLEAR_WORD0 0x28cc8
+#define CB_COLOR2_CLEAR_WORD0 0x28d04
+#define CB_COLOR3_CLEAR_WORD0 0x28d40
+#define CB_COLOR4_CLEAR_WORD0 0x28d7c
+#define CB_COLOR5_CLEAR_WORD0 0x28db8
+#define CB_COLOR6_CLEAR_WORD0 0x28df4
+#define CB_COLOR7_CLEAR_WORD0 0x28e30
+
+#define CB_COLOR1_CLEAR_WORD1 0x28ccc
+#define CB_COLOR2_CLEAR_WORD1 0x28d08
+#define CB_COLOR3_CLEAR_WORD1 0x28d44
+#define CB_COLOR4_CLEAR_WORD1 0x28d80
+#define CB_COLOR5_CLEAR_WORD1 0x28dbc
+#define CB_COLOR6_CLEAR_WORD1 0x28df8
+#define CB_COLOR7_CLEAR_WORD1 0x28e34
+
+#define CB_COLOR1_CLEAR_WORD2 0x28cd0
+#define CB_COLOR2_CLEAR_WORD2 0x28d0c
+#define CB_COLOR3_CLEAR_WORD2 0x28d48
+#define CB_COLOR4_CLEAR_WORD2 0x28d84
+#define CB_COLOR5_CLEAR_WORD2 0x28dc0
+#define CB_COLOR6_CLEAR_WORD2 0x28dfc
+#define CB_COLOR7_CLEAR_WORD2 0x28e38
+
+#define CB_COLOR1_CLEAR_WORD3 0x28cd4
+#define CB_COLOR2_CLEAR_WORD3 0x28d10
+#define CB_COLOR3_CLEAR_WORD3 0x28d4c
+#define CB_COLOR4_CLEAR_WORD3 0x28d88
+#define CB_COLOR5_CLEAR_WORD3 0x28dc4
+#define CB_COLOR6_CLEAR_WORD3 0x28e00
+#define CB_COLOR7_CLEAR_WORD3 0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0 0x30000
+#define SQ_TEX_RESOURCE_WORD1_0 0x30004
+# define TEX_ARRAY_MODE(x) ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0 0x30008
+#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0 0x30010
+#define SQ_TEX_RESOURCE_WORD5_0 0x30014
+#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650cdb79..a89a15ab524d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,15 +26,18 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "r100d.h"
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
+#include "atom.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -65,6 +68,274 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == 0) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ /* only one clock mode per power state */
+ rdev->pm.requested_clock_mode_index = 0;
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ sclk_cntl = RREG32_PLL(SCLK_CNTL);
+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+ else
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+ } else
+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+ switch (voltage->delay) {
+ case 33:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+ break;
+ case 66:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+ break;
+ case 99:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+ break;
+ case 132:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+ break;
+ }
+ } else
+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ sclk_cntl &= ~FORCE_HDP;
+ else
+ sclk_cntl |= FORCE_HDP;
+
+ WREG32_PLL(SCLK_CNTL, sclk_cntl);
+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -197,13 +468,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
/* set address range for PCI address translate */
- WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- WREG32(RADEON_AIC_HI_ADDR, tmp);
+ WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+ WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -235,9 +506,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
void r100_pci_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
r100_pci_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int r100_irq_set(struct radeon_device *rdev)
@@ -252,6 +523,9 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
+ if (rdev->irq.gui_idle) {
+ tmp |= RADEON_GUI_IDLE_MASK;
+ }
if (rdev->irq.crtc_vblank_int[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
@@ -286,6 +560,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= RADEON_GUI_IDLE_STAT;
+ }
+
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@@ -297,6 +577,9 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@@ -309,12 +592,22 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
}
+ /* gui idle interrupt */
+ if (status & RADEON_GUI_IDLE_STAT) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -326,6 +619,8 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -366,8 +661,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -657,26 +952,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
- /* Reset CP */
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
- }
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@@ -739,6 +1014,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+ /* protect against crazy HW on resume */
+ rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -779,39 +1056,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-int r100_cp_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 16))) {
- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
-}
-
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -986,7 +1230,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = CP_PACKET0_GET_REG(header);
- mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1020,7 +1263,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
out:
- mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r;
}
@@ -1384,6 +1626,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_RGB332:
case RADEON_TXFORMAT_Y8:
track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_AI88:
case RADEON_TXFORMAT_ARGB1555:
@@ -1395,12 +1638,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
@@ -1701,7 +1946,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 31))) {
+ if (!(tmp & RADEON_RBBM_ACTIVE)) {
return 0;
}
DRM_UDELAY(1);
@@ -1716,8 +1961,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 2)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -1725,81 +1970,172 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- /* TODO: anythings to do here ? pipes ? */
- r100_hdp_reset(rdev);
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+}
+
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev: radeon device structure
+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp: radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+ unsigned long cjiffies, elapsed;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, lockup->last_jiffies)) {
+ /* likely a wrap around */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (cp->rptr != lockup->last_cp_rptr) {
+ /* CP is still working no lockup */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+ if (elapsed >= 3000) {
+ /* very likely the improbable case where current
+ * rptr is equal to last recorded, a while ago, rptr
+ * this is more likely a false positive update tracking
+ * information which should force us to be recall at
+ * latter point
+ */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (elapsed >= 1000) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
-void r100_hdp_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 rbbm_status;
+ int r;
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
+ u32 tmp;
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 26))) {
- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
}
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
+ struct r100_mc_save save;
+ u32 status, tmp;
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
- /* TODO: reset 3D engine */
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
void r100_set_common_regs(struct radeon_device *rdev)
{
+ struct drm_device *dev = rdev->ddev;
+ bool force_dac2 = false;
+ u32 tmp;
+
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +2144,74 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+ /* always set up dac2 on rn50 and some rv100 as lots
+ * of servers seem to wire it up to a VGA port but
+ * don't report it in the bios connector
+ * table.
+ */
+ switch (dev->pdev->device) {
+ /* RN50 */
+ case 0x515e:
+ case 0x5969:
+ force_dac2 = true;
+ break;
+ /* RV100*/
+ case 0x5159:
+ case 0x515a:
+ /* DELL triple head servers */
+ if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((dev->pdev->subsystem_device == 0x016c) ||
+ (dev->pdev->subsystem_device == 0x016d) ||
+ (dev->pdev->subsystem_device == 0x016e) ||
+ (dev->pdev->subsystem_device == 0x016f) ||
+ (dev->pdev->subsystem_device == 0x0170) ||
+ (dev->pdev->subsystem_device == 0x017d) ||
+ (dev->pdev->subsystem_device == 0x017e) ||
+ (dev->pdev->subsystem_device == 0x0183) ||
+ (dev->pdev->subsystem_device == 0x018a) ||
+ (dev->pdev->subsystem_device == 0x019a)))
+ force_dac2 = true;
+ break;
+ }
+
+ if (force_dac2) {
+ u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+ /* For CRT on DAC2, don't turn it on if BIOS didn't
+ enable it, even it's detected.
+ */
+
+ /* force it to crtc0 */
+ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+ /* set up the TV DAC */
+ tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+ RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK);
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2 |
+ (0x58 << 16));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+
+ /* switch PM block to ACPI mode */
+ tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+ tmp &= ~RADEON_PM_MODE_SEL;
+ WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
}
/*
@@ -1889,17 +2293,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
void r100_vram_init_sizes(struct radeon_device *rdev)
{
u64 config_aper_size;
- u32 accessible;
+ /* work out accessible VRAM */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+ /* FIXME we don't use the second aperture yet when we could use it */
+ if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
-
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
/* read NB_TOM to get the amount of ram stolen for the GPU */
tom = RREG32(RADEON_NB_TOM);
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
- /* for IGPs we need to keep VRAM where it was put by the BIOS */
- rdev->mc.vram_location = (tom & 0xffff) << 16;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} else {
@@ -1911,30 +2318,14 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* let driver place VRAM */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
- * Novell bug 204882 + along with lots of ubuntu ones */
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ * Novell bug 204882 + along with lots of ubuntu ones
+ */
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
-
- /* work out accessible VRAM */
- accessible = r100_get_accessible_vram(rdev);
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (accessible > rdev->mc.aper_size)
- accessible = rdev->mc.aper_size;
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2342,20 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
WREG32(RADEON_CONFIG_CNTL, temp);
}
-void r100_vram_info(struct radeon_device *rdev)
+void r100_mc_init(struct radeon_device *rdev)
{
- r100_vram_get_type(rdev);
+ u64 base;
+ r100_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
@@ -2206,12 +2606,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
int surf_index = reg * 16;
int flags = 0;
- /* r100/r200 divide by 16 */
- if (rdev->family < CHIP_R300)
- flags = pitch / 16;
- else
- flags = pitch / 8;
-
if (rdev->family <= CHIP_RS200) {
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
@@ -2235,6 +2629,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
if (tiling_flags & RADEON_TILING_SWAP_32BIT)
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
+ /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
+ if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
+ if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
+ if (ASIC_IS_RN50(rdev))
+ pitch /= 16;
+ }
+
+ /* r100/r200 divide by 16 */
+ if (rdev->family < CHIP_R300)
+ flags |= pitch / 16;
+ else
+ flags |= pitch / 8;
+
+
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
@@ -2255,53 +2663,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init_half(3),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
- fixed_init(1),
- fixed_init_half(1),
- fixed_init(2),
- fixed_init_half(2),
- fixed_init(3),
- fixed_init_half(3),
- fixed_init(4),
- fixed_init_half(4)
+ dfixed_init(1),
+ dfixed_init_half(1),
+ dfixed_init(2),
+ dfixed_init_half(2),
+ dfixed_init(3),
+ dfixed_init_half(3),
+ dfixed_init(4),
+ dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
- fixed_init(8),
- fixed_init(9),
- fixed_init(10),
- fixed_init(11)
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ dfixed_init(8),
+ dfixed_init(9),
+ dfixed_init(10),
+ dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2319,6 +2727,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@@ -2330,7 +2740,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
- min_mem_eff.full = rfixed_const_8(0);
+ min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2347,35 +2757,32 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
* determine is there is enough bw for current mode
*/
- mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
- temp_ff.full = rfixed_const(100);
- mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
- sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
- sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+ sclk_ff = rdev->pm.sclk;
+ mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
- temp_ff.full = rfixed_const(temp);
- mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
- temp_ff.full = rfixed_const(1000);
- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
- pix_clk.full = rfixed_div(pix_clk, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes1);
- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = dfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes1);
+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
- temp_ff.full = rfixed_const(1000);
- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
- pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes2);
- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes2);
+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2417,9 +2824,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
- trcd_ff.full = rfixed_const(mem_trcd);
- trp_ff.full = rfixed_const(mem_trp);
- tras_ff.full = rfixed_const(mem_tras);
+ trcd_ff.full = dfixed_const(mem_trcd);
+ trp_ff.full = dfixed_const(mem_trp);
+ tras_ff.full = dfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2437,7 +2844,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
- tcas_ff.full += rfixed_const(data);
+ tcas_ff.full += dfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2474,72 +2881,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
- agpmode_ff.full = rfixed_const(radeon_agpmode);
- temp_ff.full = rfixed_const_666(16);
- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+ agpmode_ff.full = dfixed_const(radeon_agpmode);
+ temp_ff.full = dfixed_const_666(16);
+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
- sclk_delay_ff.full = rfixed_const(250);
+ sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
else
- sclk_delay_ff.full = rfixed_const(33);
+ sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
- sclk_delay_ff.full = rfixed_const(57);
+ sclk_delay_ff.full = dfixed_const(57);
else
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
}
}
- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
} else {
- k1.full = rfixed_const(20);
+ k1.full = dfixed_const(20);
c = 1;
}
} else {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
}
- temp_ff.full = rfixed_const(2);
- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
- temp_ff.full = rfixed_const(c);
- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
- temp_ff.full = rfixed_const(4);
- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+ temp_ff.full = dfixed_const(2);
+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = dfixed_const(c);
+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = dfixed_const(4);
+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
- temp_ff.full = rfixed_const(cur_size);
- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+ temp_ff.full = dfixed_const(cur_size);
+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(8);
- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+ disp_latency_overhead.full = dfixed_const(8);
+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2567,16 +2974,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes1));
- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point = rfixed_trunc(crit_point_ff);
+ critical_point = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
@@ -2647,8 +3054,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes2));
- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2669,8 +3076,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
- temp_ff.full = rfixed_const(temp);
- temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
@@ -2678,15 +3085,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point2 = rfixed_trunc(crit_point_ff);
+ critical_point2 = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
@@ -2750,33 +3157,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("compress format %d\n", t->compress_format);
}
-static int r100_cs_track_cube(struct radeon_device *rdev,
- struct r100_cs_track *track, unsigned idx)
-{
- unsigned face, w, h;
- struct radeon_bo *cube_robj;
- unsigned long size;
-
- for (face = 0; face < 5; face++) {
- cube_robj = track->textures[idx].cube_info[face].robj;
- w = track->textures[idx].cube_info[face].width;
- h = track->textures[idx].cube_info[face].height;
-
- size = w * h;
- size *= track->textures[idx].cpp;
-
- size += track->textures[idx].cube_info[face].offset;
-
- if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
- r100_cs_track_texture_print(&track->textures[idx]);
- return -1;
- }
- }
- return 0;
-}
-
static int r100_track_compress_size(int compress_format, int w, int h)
{
int block_width, block_height, block_bytes;
@@ -2807,12 +3187,43 @@ static int r100_track_compress_size(int compress_format, int w, int h)
return sz;
}
+static int r100_cs_track_cube(struct radeon_device *rdev,
+ struct r100_cs_track *track, unsigned idx)
+{
+ unsigned face, w, h;
+ struct radeon_bo *cube_robj;
+ unsigned long size;
+ unsigned compress_format = track->textures[idx].compress_format;
+
+ for (face = 0; face < 5; face++) {
+ cube_robj = track->textures[idx].cube_info[face].robj;
+ w = track->textures[idx].cube_info[face].width;
+ h = track->textures[idx].cube_info[face].height;
+
+ if (compress_format) {
+ size = r100_track_compress_size(compress_format, w, h);
+ } else
+ size = w * h;
+ size *= track->textures[idx].cpp;
+
+ size += track->textures[idx].cube_info[face].offset;
+
+ if (size > radeon_bo_size(cube_robj)) {
+ DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
+ r100_cs_track_texture_print(&track->textures[idx]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
struct radeon_bo *robj;
unsigned long size;
- unsigned u, i, w, h;
+ unsigned u, i, w, h, d;
int ret;
for (u = 0; u < track->num_texture; u++) {
@@ -2844,20 +3255,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
+ if (track->textures[u].tex_coord_type == 1) {
+ d = (1 << track->textures[u].txdepth) / (1 << i);
+ if (!d)
+ d = 1;
+ } else {
+ d = 1;
+ }
if (track->textures[u].compress_format) {
- size += r100_track_compress_size(track->textures[u].compress_format, w, h);
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
- size += w * h;
+ size += w * h * d;
}
size *= track->textures[u].cpp;
switch (track->textures[u].tex_coord_type) {
case 0:
- break;
case 1:
- size *= (1 << track->textures[u].txdepth);
break;
case 2:
if (track->separate_cube) {
@@ -2891,7 +3307,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_cb; i++) {
if (track->cb[i].robj == NULL) {
- if (!(track->fastfill || track->color_channel_mask ||
+ if (!(track->zb_cb_clear || track->color_channel_mask ||
track->blend_read_enable)) {
continue;
}
@@ -2928,7 +3344,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
}
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
- nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ if (track->vap_vf_cntl & (1 << 14)) {
+ nverts = track->vap_alt_nverts;
+ } else {
+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ }
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
@@ -3226,10 +3646,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Update base address for crtc */
- WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
+ WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
- WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
- rdev->mc.vram_location);
+ WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
}
/* Restore CRTC registers */
WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3312,7 +3731,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
- r100_gpu_init(rdev);
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3349,7 +3768,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -3390,32 +3809,6 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-int r100_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_IGP) {
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
- rdev->mc.vram_location = tmp << 16;
- }
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r)
- return r;
- return 0;
-}
-
int r100_init(struct radeon_device *rdev)
{
int r;
@@ -3443,7 +3836,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -3456,14 +3849,15 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- r100_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r100_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize VRAM */
+ r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index b27a6999d219..f47cdca1c004 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -64,6 +64,7 @@ struct r100_cs_track {
unsigned maxy;
unsigned vtx_size;
unsigned vap_vf_cntl;
+ unsigned vap_alt_nverts;
unsigned immd_dwords;
unsigned num_arrays;
unsigned max_indx;
@@ -74,7 +75,7 @@ struct r100_cs_track {
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
- bool fastfill;
+ bool zb_cb_clear;
bool blend_read_enable;
};
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c466..d016b16fa116 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
+/* PLL regs */
+#define SCLK_CNTL 0xd
+#define FORCE_HDP (1 << 17)
+#define CLK_PWRMGT_CNTL 0x14
+#define GLOBAL_PMAN_EN (1 << 10)
+#define DISP_PM (1 << 20)
+#define PLL_PWRMGT_CNTL 0x15
+#define MPLL_TURNOFF (1 << 0)
+#define SPLL_TURNOFF (1 << 1)
+#define PPLL_TURNOFF (1 << 2)
+#define P2PLL_TURNOFF (1 << 3)
+#define TVPLL_TURNOFF (1 << 4)
+#define MOBILE_SU (1 << 16)
+#define SU_SCLK_USE_BCLK (1 << 17)
+#define SCLK_CNTL2 0x1e
+#define REDUCED_SPEED_SCLK_MODE (1 << 16)
+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
+#define MCLK_MISC 0x1f
+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
+#define SCLK_MORE_CNTL 0x35
+#define REDUCED_SPEED_SCLK_EN (1 << 16)
+#define IO_CG_VOLTAGE_DROP (1 << 17)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
+#define VOLTAGE_DROP_SYNC (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN 0xd08
+#define DISP_D3_GRPH_RST (1 << 18)
+#define DISP_D3_SUBPIC_RST (1 << 19)
+#define DISP_D3_OV0_RST (1 << 20)
+#define DISP_D1D2_GRPH_RST (1 << 21)
+#define DISP_D1D2_SUBPIC_RST (1 << 22)
+#define DISP_D1D2_OV0_RST (1 << 23)
+#define DISP_DVO_ENABLE_RST (1 << 24)
+#define TV_ENABLE_RST (1 << 25)
+#define AUTO_PWRUP_EN (1 << 26)
#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd608bf..0266d72e0a4c 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -30,7 +30,9 @@
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
+#include "r100d.h"
#include "r200_reg_safe.h"
#include "r100_track.h"
@@ -79,6 +81,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
+int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence)
+{
+ uint32_t size;
+ uint32_t cur_size;
+ int i, num_loops;
+ int r = 0;
+
+ /* radeon pitch is /64 */
+ size = num_pages << PAGE_SHIFT;
+ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+ /* Must wait for 2D idle & clean before DMA or hangs might happen */
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (1 << 16));
+ for (i = 0; i < num_loops; i++) {
+ cur_size = size;
+ if (cur_size > 0x1FFFFF) {
+ cur_size = 0x1FFFFF;
+ }
+ size -= cur_size;
+ radeon_ring_write(rdev, PACKET0(0x720, 2));
+ radeon_ring_write(rdev, src_offset);
+ radeon_ring_write(rdev, dst_offset);
+ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ src_offset += cur_size;
+ dst_offset += cur_size;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence);
+ }
+ radeon_ring_unlock_commit(rdev);
+ return r;
+}
+
+
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
@@ -368,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
+ case 3:
+ case 4:
case 5:
case 6:
case 7:
@@ -403,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
@@ -414,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_DVDU88:
case R200_TXFORMAT_AVYU4444:
track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_ARGB8888:
case R200_TXFORMAT_RGBA8888:
@@ -421,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4d..19a7ef7ee344 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,10 +26,13 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
-#include "drmP.h"
-#include "drm.h"
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_drm.h"
#include "r100_track.h"
#include "r300d.h"
@@ -117,18 +120,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
- WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+ tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
table_addr = rdev->gart.table_addr;
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
/* FIXME: setup default page */
- WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
@@ -148,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -163,9 +171,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rv370_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -174,18 +182,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(0x43E0, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(0x43E4, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
- radeon_ring_write(rdev, (2 << 0));
- radeon_ring_write(rdev, PACKET0(0x4F18, 0));
- radeon_ring_write(rdev, (1 << 0));
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +208,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- uint32_t size;
- uint32_t cur_size;
- int i, num_loops;
- int r = 0;
-
- /* radeon pitch is /64 */
- size = num_pages << PAGE_SHIFT;
- num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
- /* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
- radeon_ring_write(rdev, (1 << 16));
- for (i = 0; i < num_loops; i++) {
- cur_size = size;
- if (cur_size > 0x1FFFFF) {
- cur_size = 0x1FFFFF;
- }
- size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
- src_offset += cur_size;
- dst_offset += cur_size;
- }
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
- if (fence) {
- r = radeon_fence_emit(rdev, fence);
- }
- radeon_ring_unlock_commit(rdev);
- return r;
-}
-
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
@@ -281,8 +247,8 @@ void r300_ring_start(struct radeon_device *rdev)
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(0x170C, 0));
- radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +315,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 4)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & R300_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -362,13 +328,12 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
- r100_hdp_reset(rdev);
- /* FIXME: rv380 one pipes ? */
- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
+ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+ (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
- /* rv350,rv370,rv380 */
+ /* rv350,rv370,rv380,r300 AD, r350 AH */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
@@ -395,8 +360,8 @@ void r300_gpu_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
R300_DC_AUTOFLUSH_ENABLE |
@@ -414,99 +379,95 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
- bool reinit_cp;
- int i;
+ u32 rbbm_status;
+ int r;
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- r300_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
+ struct r100_mc_save save;
+ u32 status, tmp;
+
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successfull
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
-
/*
* r300,r350,rv350,rv380 VRAM info
*/
-void r300_vram_info(struct radeon_device *rdev)
+void r300_mc_init(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u64 base;
+ u32 tmp;
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
-
tmp = RREG32(RADEON_MEM_CNTL);
tmp &= R300_MEM_NUM_CHANNELS_MASK;
switch (tmp) {
@@ -515,8 +476,15 @@ void r300_vram_info(struct radeon_device *rdev)
case 2: rdev->mc.vram_width = 256; break;
default: rdev->mc.vram_width = 128; break;
}
-
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +546,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
}
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ if (rdev->family < CHIP_R600)
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ else
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
{
@@ -707,6 +709,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
@@ -726,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
+ case 0x2088:
+ /* VAP_ALT_NUM_VERTICES - only valid on r500 */
+ if (p->rdev->family < CHIP_RV515)
+ goto fail;
+ track->vap_alt_nverts = idx_value & 0xFFFFFF;
+ break;
case 0x43E4:
/* SC_SCISSOR1 */
track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -757,11 +767,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
-
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -828,7 +839,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;;
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
@@ -869,6 +882,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Y4X4:
case R300_TX_FORMAT_Z3Y3X2:
track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
case R300_TX_FORMAT_Y8X8:
@@ -880,6 +894,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
case R300_TX_FORMAT_Z11Y11X10:
@@ -890,14 +905,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_I32:
case 0x1e:
track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
case R300_TX_FORMAT_FL_R16G16B16A16:
case R300_TX_FORMAT_FL_I32A32:
track->textures[i].cpp = 8;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_DXT1:
track->textures[i].cpp = 1;
@@ -1032,7 +1050,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4d1c:
/* ZB_BW_CNTL */
- track->fastfill = !!(idx_value & (1 << 2));
+ track->zb_cb_clear = !!(idx_value & (1 << 5));
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
@@ -1044,11 +1062,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
/* fallthrough do not move */
default:
- printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
- reg, idx);
- return -EINVAL;
+ goto fail;
}
return 0;
+fail:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
}
static int r300_packet3_check(struct radeon_cs_parser *p,
@@ -1157,6 +1177,8 @@ int r300_cs_parse(struct radeon_cs_parser *p)
int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
@@ -1302,7 +1324,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -1372,7 +1394,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -1385,14 +1407,15 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b73..c5c2742e4140 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
int reg;
int sz;
int i;
- int values[64];
+ u32 *value;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if ((sz > 64) || (sz < 0)) {
- DRM_ERROR
- ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
- reg, sz);
+ DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
+
for (i = 0; i < sz; i++) {
- values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
- if (!radeon_check_offset(dev_priv, (u32) values[i])) {
- DRM_ERROR
- ("Offset failed range check (reg=%04x sz=%d)\n",
- reg, sz);
+ value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *value)) {
+ DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
- reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+ reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return -EINVAL;
}
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE(values, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 16 > cmdbuf->bufsz)
+ if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
OUT_RING(0);
ADVANCE_RING();
- cmdbuf->buf += sz * 16;
- cmdbuf->bufsz -= sz * 16;
-
return 0;
}
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
{
RING_LOCALS;
- if (8 * 4 > cmdbuf->bufsz)
+ if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
- OUT_RING_TABLE((int *)cmdbuf->buf, 8);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
ADVANCE_RING();
BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
- cmdbuf->buf += 8 * 4;
- cmdbuf->bufsz -= 8 * 4;
-
return 0;
}
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
{
int count, i, k;
#define MAX_ARRAY_PACKET 64
- u32 payload[MAX_ARRAY_PACKET];
+ u32 *data;
u32 narrays;
RING_LOCALS;
- count = (header >> 16) & 0x3fff;
+ count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return -EINVAL;
}
- memset(payload, 0, MAX_ARRAY_PACKET * 4);
- memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
-
/* carefully check packet contents */
- narrays = payload[0];
+ /* We have already read the header so advance the buffer. */
+ drm_buffer_advance(cmdbuf->buffer, 4);
+
+ narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if (k == narrays)
break;
/* have one more to process, they come in pairs */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
BEGIN_RING(count + 2);
OUT_RING(header);
- OUT_RING_TABLE(payload, count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
int count, ret;
RING_LOCALS;
- count=(cmd[0]>>16) & 0x3fff;
- if (cmd[0] & 0x8000) {
- u32 offset;
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if (*cmd & 0x8000) {
+ u32 offset;
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
int count;
int expected_count;
RING_LOCALS;
- cmd = (u32 *) cmdbuf->buf;
- count = (cmd[0]>>16) & 0x3fff;
- expected_count = cmd[1] >> 16;
- if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ expected_count = *cmd1 >> 16;
+ if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
if (!count) {
- drm_r300_cmd_header_t header;
+ drm_r300_cmd_header_t stack_header, *header;
+ u32 *cmd1, *cmd2, *cmd3;
- if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer)
+ < 4*4 + sizeof(stack_header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
- header.u = *(unsigned int *)cmdbuf->buf;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
- cmd = (u32 *) cmdbuf->buf;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- if (header.header.cmd_type != R300_CMD_PACKET3 ||
- header.packet3.packet != R300_CMD_PACKET3_RAW ||
- cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ if (header->header.cmd_type != R300_CMD_PACKET3 ||
+ header->packet3.packet != R300_CMD_PACKET3_RAW ||
+ *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+ *cmd1);
return -EINVAL;
}
- if (!radeon_check_offset(dev_priv, cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ if (!radeon_check_offset(dev_priv, *cmd2)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+ *cmd2);
return -EINVAL;
}
- if (cmd[3] != expected_count) {
+ if (*cmd3 != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
- cmd[3], expected_count);
+ *cmd3, expected_count);
return -EINVAL;
}
BEGIN_RING(4);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
ADVANCE_RING();
-
- cmdbuf->buf += 4*4;
- cmdbuf->bufsz -= 4*4;
}
return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 header;
+ u32 *header;
int count;
RING_LOCALS;
- if (4 > cmdbuf->bufsz)
+ if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
- header = *(u32 *) cmdbuf->buf;
+ header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
/* Is it packet 3 ? */
- if ((header >> 30) != 0x3) {
- DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+ if ((*header >> 30) != 0x3) {
+ DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
- count = (header >> 16) & 0x3fff;
+ count = (*header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
- if ((count + 2) * 4 > cmdbuf->bufsz) {
+ if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
- (count + 2) * 4, cmdbuf->bufsz);
+ (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
return -EINVAL;
}
/* Is it a packet type we know about ? */
- switch (header & 0xff00) {
+ switch (*header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
- return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* these packets are safe */
break;
default:
- DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+ DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
BEGIN_RING(count + 2);
- OUT_RING(header);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
{
int n;
int ret;
- char *orig_buf = cmdbuf->buf;
- int orig_bufsz = cmdbuf->bufsz;
+ int orig_iter = cmdbuf->buffer->iterator;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
if (ret)
return ret;
- cmdbuf->buf = orig_buf;
- cmdbuf->bufsz = orig_bufsz;
+ cmdbuf->buffer->iterator = orig_iter;
}
switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
break;
default:
- DRM_ERROR("bad packet3 type %i at %p\n",
+ DRM_ERROR("bad packet3 type %i at byte %d\n",
header.packet3.packet,
- cmdbuf->buf - sizeof(header));
+ cmdbuf->buffer->iterator - (int)sizeof(header));
return -EINVAL;
}
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
- u32 i, buf_idx, h_pending;
- u64 ptr_addr;
+ u32 i, *buf_idx, h_pending;
+ u64 *ptr_addr;
+ u64 stack_ptr_addr;
RING_LOCALS;
- if (cmdbuf->bufsz <
- (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer) <
+ (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
return -EINVAL;
}
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
dev_priv->scratch_ages[header.scratch.reg]++;
- ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
- ref_age_base = (u32 *)(unsigned long)ptr_addr;
-
- cmdbuf->buf += sizeof(u64);
- cmdbuf->bufsz -= sizeof(u64);
+ ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_ptr_addr), &stack_ptr_addr);
+ ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
for (i=0; i < header.scratch.n_bufs; i++) {
- buf_idx = *(u32 *)cmdbuf->buf;
- buf_idx *= 2; /* 8 bytes per buf */
+ buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ *buf_idx *= 2; /* 8 bytes per buf */
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ &dev_priv->scratch_ages[header.scratch.reg],
+ sizeof(u32)))
return -EINVAL;
- }
- if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
+ if (DRM_COPY_FROM_USER(&h_pending,
+ ref_age_base + *buf_idx + 1,
+ sizeof(u32)))
return -EINVAL;
- }
- if (h_pending == 0) {
+ if (h_pending == 0)
return -EINVAL;
- }
h_pending--;
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ &h_pending,
+ sizeof(u32)))
return -EINVAL;
- }
- cmdbuf->buf += sizeof(buf_idx);
- cmdbuf->bufsz -= sizeof(buf_idx);
+ drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
}
BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
- if (sz * stride * 4 > cmdbuf->bufsz)
+ if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
ADVANCE_RING();
- cmdbuf->buf += sz * stride * 4;
- cmdbuf->bufsz -= sz * stride * 4;
-
return 0;
}
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
- while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer)
+ >= sizeof(drm_r300_cmd_header_t)) {
int idx;
- drm_r300_cmd_header_t header;
-
- header.u = *(unsigned int *)cmdbuf->buf;
+ drm_r300_cmd_header_t *header, stack_header;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
- ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
- ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+ ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
- ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
int i;
RING_LOCALS;
- BEGIN_RING(header.delay.count);
- for (i = 0; i < header.delay.count; i++)
+ BEGIN_RING(header->delay.count);
+ for (i = 0; i < header->delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_WAIT:
DRM_DEBUG("R300_CMD_WAIT\n");
- r300_cmd_wait(dev_priv, header);
+ r300_cmd_wait(dev_priv, *header);
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
- ret = r300_scratch(dev_priv, cmdbuf, header);
+ ret = r300_scratch(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
- ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
+ ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
- DRM_ERROR("bad cmd_type %i at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %i at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator - (int)sizeof(*header));
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b69580..1a0d5362cd79 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_MACRO_TILE (1 << 2)
# define R300_TXO_MICRO_TILE (1 << 3)
+# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de9..968a33317fbf 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d9373246c97f..e6c89142bb4d 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,40 +26,59 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "r100d.h"
#include "r420d.h"
#include "r420_reg_safe.h"
-static void r420_set_reg_safe(struct radeon_device *rdev)
+void r420_pm_init_profile(struct radeon_device *rdev)
{
- rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
- rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
-int r420_mc_init(struct radeon_device *rdev)
+static void r420_set_reg_safe(struct radeon_device *rdev)
{
- int r;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
- return 0;
+ rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
void r420_pipes_init(struct radeon_device *rdev)
@@ -69,7 +88,8 @@ void r420_pipes_init(struct radeon_device *rdev)
unsigned num_pipes;
/* GA_ENHANCE workaround TCL deadlock issue */
- WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
+ WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+ (1 << 2) | (1 << 3));
/* add idle wait as per freedesktop.org bug 24041 */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -78,6 +98,12 @@ void r420_pipes_init(struct radeon_device *rdev)
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+ /* SE chips have 1 pipe */
+ if ((rdev->pdev->device == 0x5e4c) ||
+ (rdev->pdev->device == 0x5e4f))
+ num_pipes = 1;
+
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
@@ -97,17 +123,17 @@ void r420_pipes_init(struct radeon_device *rdev)
tmp = (7 << 1);
break;
}
- WREG32(0x42C8, (1 << num_pipes) - 1);
+ WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
- tmp |= (1 << 4) | (1 << 0);
- WREG32(0x4018, tmp);
+ tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+ WREG32(R300_GB_TILE_CONFIG, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -254,7 +280,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -334,7 +360,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -346,15 +372,15 @@ int r420_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r) {
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
}
+ /* initialize memory controller */
+ r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89bdf2b5..93c9a2bbccf8 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@@ -488,6 +490,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
@@ -717,54 +720,62 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
#define AVIVO_DC_GPIO_HPD_A 0x7e94
-
-#define AVIVO_GPIO_0 0x7e30
-#define AVIVO_GPIO_1 0x7e40
-#define AVIVO_GPIO_2 0x7e50
-#define AVIVO_GPIO_3 0x7e60
-
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
-#define AVIVO_I2C_STATUS 0x7d30
-# define AVIVO_I2C_STATUS_DONE (1 << 0)
-# define AVIVO_I2C_STATUS_NACK (1 << 1)
-# define AVIVO_I2C_STATUS_HALT (1 << 2)
-# define AVIVO_I2C_STATUS_GO (1 << 3)
-# define AVIVO_I2C_STATUS_MASK 0x7
-/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
- * DONE? */
-# define AVIVO_I2C_STATUS_CMD_RESET 0x7
-# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
-#define AVIVO_I2C_STOP 0x7d34
-#define AVIVO_I2C_START_CNTL 0x7d38
-# define AVIVO_I2C_START (1 << 8)
-# define AVIVO_I2C_CONNECTOR0 (0 << 16)
-# define AVIVO_I2C_CONNECTOR1 (1 << 16)
-#define R520_I2C_START (1<<0)
-#define R520_I2C_STOP (1<<1)
-#define R520_I2C_RX (1<<2)
-#define R520_I2C_EN (1<<8)
-#define R520_I2C_DDC1 (0<<16)
-#define R520_I2C_DDC2 (1<<16)
-#define R520_I2C_DDC3 (2<<16)
-#define R520_I2C_DDC_MASK (3<<16)
-#define AVIVO_I2C_CONTROL2 0x7d3c
-# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
-# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
-#define AVIVO_I2C_CONTROL3 0x7d40
-/* Reading is done 4 bytes at a time: read the bottom 8 bits from
- * 7d44, four times in a row.
- * Writing is a little more complex. First write DATA with
- * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
- * magic number, zz is, I think, the slave address, and yy is the byte
- * you want to write. */
-#define AVIVO_I2C_DATA 0x7d44
-#define R520_I2C_ADDR_COUNT_MASK (0x7)
-#define R520_I2C_DATA_COUNT_SHIFT (8)
-#define R520_I2C_DATA_COUNT_MASK (0xF00)
-#define AVIVO_I2C_CNTL 0x7d50
-# define AVIVO_I2C_EN (1 << 0)
-# define AVIVO_I2C_RESET (1 << 8)
+#define AVIVO_DC_I2C_STATUS1 0x7d30
+# define AVIVO_DC_I2C_DONE (1 << 0)
+# define AVIVO_DC_I2C_NACK (1 << 1)
+# define AVIVO_DC_I2C_HALT (1 << 2)
+# define AVIVO_DC_I2C_GO (1 << 3)
+#define AVIVO_DC_I2C_RESET 0x7d34
+# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
+# define AVIVO_DC_I2C_ABORT (1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 0x7d38
+# define AVIVO_DC_I2C_START (1 << 0)
+# define AVIVO_DC_I2C_STOP (1 << 1)
+# define AVIVO_DC_I2C_RECEIVE (1 << 2)
+# define AVIVO_DC_I2C_EN (1 << 8)
+# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
+# define AVIVO_SEL_DDC1 0
+# define AVIVO_SEL_DDC2 1
+# define AVIVO_SEL_DDC3 2
+#define AVIVO_DC_I2C_CONTROL2 0x7d3c
+# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
+# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 0x7d40
+# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
+# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
+# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
+# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
+# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
+# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
+#define AVIVO_DC_I2C_DATA 0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
+# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
+# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
+# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 0x7d50
+# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
+# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
+# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
+# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
+# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
+# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731eba0d..694af7cc23ac 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -27,6 +27,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "r520d.h"
@@ -52,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@@ -119,19 +119,16 @@ static void r520_vram_get_type(struct radeon_device *rdev)
rdev->mc.vram_width *= 2;
}
-void r520_vram_info(struct radeon_device *rdev)
+void r520_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
r520_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void r520_mc_program(struct radeon_device *rdev)
@@ -212,7 +209,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -249,7 +246,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -265,14 +262,15 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- r520_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..e100f69faeec 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -25,12 +25,14 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
@@ -42,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -66,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -73,6 +90,499 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ /* power state array is low to high, default is first */
+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+ int min_power_state_index = 0;
+
+ if (rdev->pm.num_power_states > 2)
+ min_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = min_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == min_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else {
+ if (rdev->pm.current_power_state_index == 0)
+ rdev->pm.requested_power_state_index =
+ rdev->pm.num_power_states - 1;
+ else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ } else {
+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+ /* for now just select the first power state and switch between clock modes */
+ /* power state array is low to high, default is first (0) */
+ if (rdev->pm.active_crtc_count > 1) {
+ rdev->pm.requested_power_state_index = -1;
+ /* start at 1 as we don't want the default mode */
+ for (i = 1; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ /* if nothing selected, grab the default state. */
+ if (rdev->pm.requested_power_state_index == -1)
+ rdev->pm.requested_power_state_index = 0;
+ } else
+ rdev->pm.requested_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index == 0) {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index - 1;
+ } else {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_clock_mode_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index ==
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index + 1;
+ } else {
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+ rdev->pm.dynpm_can_upclock = false;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ }
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states == 2) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else if (rdev->pm.num_power_states == 3) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->family == CHIP_R600) {
+ /* XXX */
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ if (rdev->pm.num_power_states < 4) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ }
+ }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+ return false;
+ else
+ return true;
+}
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -353,23 +863,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
/*
* R600 PCIE GART
*/
-int r600_gart_clear_page(struct radeon_device *rdev, int i)
-{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
- u64 pte;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages)
- return -EINVAL;
- pte = 0;
- writeq(pte, ((void __iomem *)ptr) + (i * 8));
- return 0;
-}
-
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
+ /* flush hdp cache so updates hit vram */
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +917,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -499,9 +1001,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
void r600_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
void r600_agp_enable(struct radeon_device *rdev)
@@ -600,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
- WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
@@ -619,9 +1121,71 @@ static void r600_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ u64 base = 0;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int r600_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u32 tmp;
int chansize, numchan;
@@ -658,75 +1222,14 @@ int r600_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r600_vram_gtt_location(rdev, &rdev->mc);
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
- rdev->mc.gtt_location = 0;
- } else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
- }
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
- if (rdev->flags & RADEON_IS_IGP)
+ if (rdev->flags & RADEON_IS_IGP) {
+ rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
+ }
+ radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -753,7 +1256,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@@ -768,7 +1270,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -787,72 +1289,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- /* Reset others GPU block if necessary */
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_IH(1);
- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
- udelay(50);
+ mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+ int r;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@@ -981,6 +1467,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
+ u32 backend_map;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
u32 sq_config;
@@ -1090,8 +1579,11 @@ void r600_gpu_init(struct radeon_device *rdev)
default:
break;
}
+ rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+ rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1101,24 +1593,34 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp);
}
tiling_config |= BANK_SWAPS(1);
- tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- rdev->config.r600.max_backends,
- (0xff << rdev->config.r600.max_backends) & 0xff);
- tiling_config |= BACKEND_MAP(tmp);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+
+ backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+
+ tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
- WREG32(CC_RB_BACKEND_DISABLE, tmp);
-
/* Setup pipes */
- tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
+ tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1454,10 +1956,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1571,12 +2094,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -1783,12 +2309,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
@@ -2033,8 +2564,6 @@ int r600_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2271,10 +2800,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770) {
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@@ -2311,7 +2841,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2341,7 +2876,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@@ -2378,19 +2913,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD4_INT_CONTROL, tmp);
if (ASIC_IS_DCE32(rdev)) {
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, 0);
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, 0);
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
@@ -2456,7 +2991,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
- r600_disable_interrupt_state(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -2466,7 +3004,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
- r600_disable_interrupts(rdev);
+ r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@@ -2481,6 +3019,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2494,7 +3034,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2504,6 +3046,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2545,10 +3088,25 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.hdmi[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 1\n");
+ hdmi1 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.hdmi[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 2\n");
+ hdmi2 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2558,6 +3116,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2641,6 +3200,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ } else {
+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ }
}
void r600_irq_disable(struct radeon_device *rdev)
@@ -2694,6 +3265,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@@ -2745,6 +3318,8 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -2765,6 +3340,8 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -2812,14 +3389,14 @@ restart_ih:
break;
case 10:
if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
- disp_int_cont &= ~DC_HPD5_INTERRUPT;
+ disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
- disp_int_cont &= ~DC_HPD6_INTERRUPT;
+ disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -2829,6 +3406,10 @@ restart_ih:
break;
}
break;
+ case 21: /* HDMI */
+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+ r600_audio_schedule_polling(rdev);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -2838,6 +3419,11 @@ restart_ih:
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb6904c4ff..2b26553c352c 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
/*
* current number of channels
*/
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
/*
* current bits per sample
*/
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
/*
* current sampling rate in HZ
*/
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
/*
* iec 60958 status bits
*/
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
/*
* iec 60958 category code
*/
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
- int changes = 0;
+ int changes = 0, still_going = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
@@ -135,15 +144,22 @@ static void r600_audio_update_hdmi(unsigned long param)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
+ r600_hdmi_update_audio_settings(encoder);
}
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ if(still_going) r600_audio_schedule_polling(rdev);
+}
+
+/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+ DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
}
/*
@@ -151,11 +167,10 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
int r600_audio_init(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+ r600_audio_engine_enable(rdev, true);
rdev->audio_channels = -1;
rdev->audio_rate = -1;
@@ -168,44 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_update_hdmi,
(unsigned long)rdev);
- mod_timer(&rdev->audio_timer, jiffies + 1);
-
return 0;
}
/*
- * determin how the encoders and audio interface is wired together
+ * enable the polling timer, to check for status changes
*/
-int r600_audio_tmds_index(struct drm_encoder *encoder)
+void r600_audio_enable_polling(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *other;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- /* special case check if an TMDS1 is present */
- list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
- if (to_radeon_encoder(other)->encoder_id ==
- ENCODER_OBJECT_ID_INTERNAL_TMDS1)
- return 1;
- }
- return 0;
+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ if (radeon_encoder->audio_polling_active)
+ return;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- return 1;
+ radeon_encoder->audio_polling_active = 1;
+ mod_timer(&rdev->audio_timer, jiffies + 1);
+}
- default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return -1;
- }
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ radeon_encoder->audio_polling_active = 0;
}
/*
@@ -216,6 +221,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@@ -223,32 +229,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
break;
-
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
-
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
- switch (r600_audio_tmds_index(encoder)) {
+ switch (dig->dig_encoder) {
case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
break;
case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
+ default:
+ dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
}
}
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return;
del_timer(&rdev->audio_timer);
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+
+ r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea432347589..ca5c29f70779 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
RING_LOCALS;
DRM_DEBUG("\n");
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
@@ -538,9 +538,12 @@ int
r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ int ret;
DRM_DEBUG("\n");
- r600_nomm_get_vb(dev);
+ ret = r600_nomm_get_vb(dev);
+ if (ret)
+ return ret;
dev_priv->blit_vb->file_priv = file_priv;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765ac72a..d13622ae74e9 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
u32 cb_color_info;
int pitch, slice;
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(num_es_stack_entries));
/* emit an IB pointing at default state */
- dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf;
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -449,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ /* don't reinitialize blit */
+ if (rdev->r600_blit.shader_obj)
+ return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
@@ -578,9 +579,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 7; /* fence emit for VB IB */
+ ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 7; /* fence emit for done copy */
+ ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
@@ -594,13 +595,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
- /* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
-
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e815c2e8..0271b53fa2dd 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -1,7 +1,42 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
#include <linux/types.h>
#include <linux/kernel.h>
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
const u32 r6xx_default_state[] =
{
0xc0002400,
@@ -9,11 +44,6 @@ const u32 r6xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
@@ -531,11 +561,6 @@ const u32 r7xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35a0931..68e6f4349309 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
u32 hdp_host_path_cntl;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 ramcfg;
/* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+ backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
num_qd_pipes =
- R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
}
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 mc_arb_ramcfg;
u32 db_debug4;
@@ -1428,25 +1510,41 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+ dev_priv->r600_max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -1459,7 +1557,7 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1469,10 +1567,8 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
- RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
- R600_SYNC_GRADIENT |
- R600_SYNC_WALKER |
- R600_SYNC_ALIGNER));
+ ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+ RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1579,28 @@ static void r700_gfx_init(struct drm_device *dev,
smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
- RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
- R700_GS_FLUSH_CTL(4) |
- R700_ACK_FLUSH_CTL(3) |
- R700_SYNC_FLUSH_CTL));
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+ RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+ R700_GS_FLUSH_CTL(4) |
+ R700_ACK_FLUSH_CTL(3) |
+ R700_SYNC_FLUSH_CTL));
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
- RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+ db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1629,10 @@ static void r700_gfx_init(struct drm_device *dev,
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2639,12 @@ out:
mutex_unlock(&dev_priv->cs_mutex);
return r;
}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+
+ *npipes = dev_priv->r600_npipes;
+ *nbanks = dev_priv->r600_nbanks;
+ *group_size = dev_priv->r600_group_size;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec16507..144c32d37136 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
+#include "r600_reg_safe.h"
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,316 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
struct r600_cs_track {
- u32 cb_color0_base_last;
+ /* configuration we miror so that we use same code btw kms/ums */
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 sq_config;
+ u32 nsamples;
+ u32 cb_color_base_last[8];
+ struct radeon_bo *cb_color_bo[8];
+ u32 cb_color_bo_offset[8];
+ struct radeon_bo *cb_color_frag_bo[8];
+ struct radeon_bo *cb_color_tile_bo[8];
+ u32 cb_color_info[8];
+ u32 cb_color_size_idx[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 cb_color_size[8];
+ u32 vgt_strmout_en;
+ u32 vgt_strmout_buffer_en;
+ u32 db_depth_control;
+ u32 db_depth_info;
+ u32 db_depth_size_idx;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_offset;
+ struct radeon_bo *db_bo;
};
+static inline int r600_bpe_from_format(u32 *bpe, u32 format)
+{
+ switch (format) {
+ case V_038004_COLOR_8:
+ case V_038004_COLOR_4_4:
+ case V_038004_COLOR_3_3_2:
+ case V_038004_FMT_1:
+ *bpe = 1;
+ break;
+ case V_038004_COLOR_16:
+ case V_038004_COLOR_16_FLOAT:
+ case V_038004_COLOR_8_8:
+ case V_038004_COLOR_5_6_5:
+ case V_038004_COLOR_6_5_5:
+ case V_038004_COLOR_1_5_5_5:
+ case V_038004_COLOR_4_4_4_4:
+ case V_038004_COLOR_5_5_5_1:
+ *bpe = 2;
+ break;
+ case V_038004_FMT_8_8_8:
+ *bpe = 3;
+ break;
+ case V_038004_COLOR_32:
+ case V_038004_COLOR_32_FLOAT:
+ case V_038004_COLOR_16_16:
+ case V_038004_COLOR_16_16_FLOAT:
+ case V_038004_COLOR_8_24:
+ case V_038004_COLOR_8_24_FLOAT:
+ case V_038004_COLOR_24_8:
+ case V_038004_COLOR_24_8_FLOAT:
+ case V_038004_COLOR_10_11_11:
+ case V_038004_COLOR_10_11_11_FLOAT:
+ case V_038004_COLOR_11_11_10:
+ case V_038004_COLOR_11_11_10_FLOAT:
+ case V_038004_COLOR_2_10_10_10:
+ case V_038004_COLOR_8_8_8_8:
+ case V_038004_COLOR_10_10_10_2:
+ case V_038004_FMT_5_9_9_9_SHAREDEXP:
+ case V_038004_FMT_32_AS_8:
+ case V_038004_FMT_32_AS_8_8:
+ *bpe = 4;
+ break;
+ case V_038004_COLOR_X24_8_32_FLOAT:
+ case V_038004_COLOR_32_32:
+ case V_038004_COLOR_32_32_FLOAT:
+ case V_038004_COLOR_16_16_16_16:
+ case V_038004_COLOR_16_16_16_16_FLOAT:
+ *bpe = 8;
+ break;
+ case V_038004_FMT_16_16_16:
+ case V_038004_FMT_16_16_16_FLOAT:
+ *bpe = 6;
+ break;
+ case V_038004_FMT_32_32_32:
+ case V_038004_FMT_32_32_32_FLOAT:
+ *bpe = 12;
+ break;
+ case V_038004_COLOR_32_32_32_32:
+ case V_038004_COLOR_32_32_32_32_FLOAT:
+ *bpe = 16;
+ break;
+ case V_038004_FMT_GB_GR:
+ case V_038004_FMT_BG_RG:
+ case V_038004_COLOR_INVALID:
+ *bpe = 16;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+ int i;
+
+ /* assume DX9 mode */
+ track->sq_config = DX9_CONSTS;
+ for (i = 0; i < 8; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_size[i] = 0;
+ track->cb_color_size_idx[i] = 0;
+ track->cb_color_info[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+ track->db_bo = NULL;
+ /* assume the biggest format and that htile is enabled */
+ track->db_depth_info = 7 | (1 << 25);
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+}
+
+static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ struct r600_cs_track *track = p->track;
+ u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
+ volatile u32 *ib = p->ib->ptr;
+
+ if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
+ return -EINVAL;
+ }
+ size = radeon_bo_size(track->cb_color_bo[i]);
+ if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ i, track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
+ slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+ if (!pitch) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
+ __func__, __LINE__, pitch, i, track->cb_color_size[i]);
+ return -EINVAL;
+ }
+ height = size / (pitch * bpe);
+ if (height > 8192)
+ height = 8192;
+ switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ if (pitch & 0x3f) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
+ __func__, __LINE__, pitch, bpe, pitch * bpe);
+ return -EINVAL;
+ }
+ if ((pitch * bpe) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~0x7;
+ if (!height)
+ height = 8;
+ break;
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ if (pitch & ((8 * track->nbanks) - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ tmp = pitch * 8 * bpe * track->nsamples;
+ tmp = tmp / track->nbanks;
+ if (tmp & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~((16 * track->npipes) - 1);
+ if (!height)
+ height = 16 * track->npipes;
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ /* check offset */
+ tmp = height * pitch;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
+ return -EINVAL;
+ }
+ /* limit max tile */
+ tmp = (height * pitch) >> 6;
+ if (tmp < slice_tile_max)
+ slice_tile_max = tmp;
+ tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
+ S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+ ib[track->cb_color_size_idx[i]] = tmp;
+ return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 tmp;
+ int r, i;
+ volatile u32 *ib = p->ib->ptr;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ /* we don't support out buffer yet */
+ if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+ /* check that we have a cb for each enabled target, we don't check
+ * shader_mask because it seems mesa isn't always setting it :(
+ */
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
+ }
+ }
+ /* Check depth buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ u32 nviews, bpe, ntiles;
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -279,7 +585,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = CP_PACKET0_GET_REG(header);
- mutex_lock(&p->rdev->ddev->mode_config.mutex);
+
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -314,7 +620,6 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
}
out:
- mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r;
}
@@ -359,6 +664,385 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+ case R_008C44_SQ_ESGS_RING_SIZE:
+ case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+ case R_008C54_SQ_ESTMP_RING_SIZE:
+ case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+ case R_008C74_SQ_FBUF_RING_SIZE:
+ case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+ case R_008C5C_SQ_GSTMP_RING_SIZE:
+ case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+ case R_008C4C_SQ_GSVS_RING_SIZE:
+ case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+ case R_008C6C_SQ_PSTMP_RING_SIZE:
+ case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+ case R_008C7C_SQ_REDUC_RING_SIZE:
+ case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+ case R_008C64_SQ_VSTMP_RING_SIZE:
+ case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case SQ_CONFIG:
+ track->sq_config = radeon_get_ib_value(p, idx);
+ break;
+ case R_028800_DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case R_028010_DB_DEPTH_INFO:
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ break;
+ case R_028004_DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case R_028000_DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case R_028AB0_VGT_STRMOUT_EN:
+ track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028B20_VGT_STRMOUT_BUFFER_EN:
+ track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028238_CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_02823C_CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_028C04_PA_SC_AA_CONFIG:
+ tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ track->nsamples = 1 << tmp;
+ break;
+ case R_0280A0_CB_COLOR0_INFO:
+ case R_0280A4_CB_COLOR1_INFO:
+ case R_0280A8_CB_COLOR2_INFO:
+ case R_0280AC_CB_COLOR3_INFO:
+ case R_0280B0_CB_COLOR4_INFO:
+ case R_0280B4_CB_COLOR5_INFO:
+ case R_0280B8_CB_COLOR6_INFO:
+ case R_0280BC_CB_COLOR7_INFO:
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case R_028060_CB_COLOR0_SIZE:
+ case R_028064_CB_COLOR1_SIZE:
+ case R_028068_CB_COLOR2_SIZE:
+ case R_02806C_CB_COLOR3_SIZE:
+ case R_028070_CB_COLOR4_SIZE:
+ case R_028074_CB_COLOR5_SIZE:
+ case R_028078_CB_COLOR6_SIZE:
+ case R_02807C_CB_COLOR7_SIZE:
+ tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+ track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_size_idx[tmp] = idx;
+ break;
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_frag_bo[tmp] = reloc->robj;
+ }
+ break;
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_tile_bo[tmp] = reloc->robj;
+ }
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case DB_DEPTH_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_bo = reloc->robj;
+ break;
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline unsigned minify(unsigned size, unsigned levels)
+{
+ size = size >> levels;
+ if (size < 1)
+ size = 1;
+ return size;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
+ unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
+ unsigned *l0_size, unsigned *mipmap_size)
+{
+ unsigned offset, i, level, face;
+ unsigned width, height, depth, rowstride, size;
+
+ w0 = minify(w0, 0);
+ h0 = minify(h0, 0);
+ d0 = minify(d0, 0);
+ for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ width = minify(w0, i);
+ height = minify(h0, i);
+ depth = minify(d0, i);
+ for(face = 0; face < nfaces; face++) {
+ rowstride = ((width * bpe) + 255) & ~255;
+ size = height * rowstride * depth;
+ offset += size;
+ offset = (offset + 0x1f) & ~0x1f;
+ }
+ }
+ *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
+ *mipmap_size = offset;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
+ if (!nlevels)
+ *mipmap_size = *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
+ u32 word0, word1, l0_size, mipmap_size;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ word0 = radeon_get_ib_value(p, idx + 0);
+ word1 = radeon_get_ib_value(p, idx + 1);
+ w0 = G_038000_TEX_WIDTH(word0) + 1;
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ nfaces = 1;
+ switch (G_038000_DIM(word0)) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
+ case V_038000_SQ_TEX_DIM_3D:
+ break;
+ case V_038000_SQ_TEX_DIM_CUBEMAP:
+ nfaces = 6;
+ break;
+ case V_038000_SQ_TEX_DIM_1D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ default:
+ dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ return -EINVAL;
+ }
+ if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ return -EINVAL;
+ }
+ word0 = radeon_get_ib_value(p, idx + 4);
+ word1 = radeon_get_ib_value(p, idx + 5);
+ blevel = G_038010_BASE_LEVEL(word0);
+ nlevels = G_038014_LAST_LEVEL(word1);
+ r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
+ /* using get ib will give us the offset into the texture bo */
+ word0 = radeon_get_ib_value(p, idx + 2);
+ if ((l0_size + word0) > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ /* using get ib will give us the offset into the mipmap bo */
+ word0 = radeon_get_ib_value(p, idx + 3);
+ if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
@@ -408,12 +1092,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1115,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
@@ -493,30 +1192,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- case SQ_ESGS_RING_BASE:
- case SQ_GSVS_RING_BASE:
- case SQ_ESTMP_RING_BASE:
- case SQ_GSTMP_RING_BASE:
- case SQ_VSTMP_RING_BASE:
- case SQ_PSTMP_RING_BASE:
- case SQ_FBUF_RING_BASE:
- case SQ_REDUC_RING_BASE:
- case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case CP_COHER_BASE:
- /* use PACKET3_SURFACE_SYNC */
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1208,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- /* This register were added late, there is userspace
- * which does provide relocation for those but set
- * 0 offset. In order to avoid breaking old userspace
- * we detect this and set address to point to last
- * CB_COLOR0_BASE, note that if userspace doesn't set
- * CB_COLOR0_BASE before this register we will report
- * error. Old userspace always set CB_COLOR0_BASE
- * before any of this.
- */
- case R_0280E0_CB_COLOR0_FRAG:
- case R_0280E4_CB_COLOR1_FRAG:
- case R_0280E8_CB_COLOR2_FRAG:
- case R_0280EC_CB_COLOR3_FRAG:
- case R_0280F0_CB_COLOR4_FRAG:
- case R_0280F4_CB_COLOR5_FRAG:
- case R_0280F8_CB_COLOR6_FRAG:
- case R_0280FC_CB_COLOR7_FRAG:
- case R_0280C0_CB_COLOR0_TILE:
- case R_0280C4_CB_COLOR1_TILE:
- case R_0280C8_CB_COLOR2_TILE:
- case R_0280CC_CB_COLOR3_TILE:
- case R_0280D0_CB_COLOR4_TILE:
- case R_0280D4_CB_COLOR5_TILE:
- case R_0280D8_CB_COLOR6_TILE:
- case R_0280DC_CB_COLOR7_TILE:
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
- if (!track->cb_color0_base_last) {
- dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] = track->cb_color0_base_last;
- printk_once(KERN_WARNING "radeon: You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
- } else {
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- }
- break;
- case DB_DEPTH_BASE:
- case DB_HTILE_DATA_BASE:
- case CB_COLOR0_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->cb_color0_base_last = ib[idx+1+i];
- break;
- case CB_COLOR1_BASE:
- case CB_COLOR2_BASE:
- case CB_COLOR3_BASE:
- case CB_COLOR4_BASE:
- case CB_COLOR5_BASE:
- case CB_COLOR6_BASE:
- case CB_COLOR7_BASE:
- case SQ_PGM_START_FS:
- case SQ_PGM_START_ES:
- case SQ_PGM_START_VS:
- case SQ_PGM_START_GS:
- case SQ_PGM_START_PS:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case VGT_DMA_BASE:
- case VGT_DMA_BASE_HI:
- /* These should be handled by DRAW_INDEX packet 3 */
- case VGT_STRMOUT_BASE_OFFSET_0:
- case VGT_STRMOUT_BASE_OFFSET_1:
- case VGT_STRMOUT_BASE_OFFSET_2:
- case VGT_STRMOUT_BASE_OFFSET_3:
- case VGT_STRMOUT_BASE_OFFSET_HI_0:
- case VGT_STRMOUT_BASE_OFFSET_HI_1:
- case VGT_STRMOUT_BASE_OFFSET_HI_2:
- case VGT_STRMOUT_BASE_OFFSET_HI_3:
- case VGT_STRMOUT_BUFFER_BASE_0:
- case VGT_STRMOUT_BUFFER_BASE_1:
- case VGT_STRMOUT_BUFFER_BASE_2:
- case VGT_STRMOUT_BUFFER_BASE_3:
- case VGT_STRMOUT_BUFFER_OFFSET_0:
- case VGT_STRMOUT_BUFFER_OFFSET_1:
- case VGT_STRMOUT_BUFFER_OFFSET_2:
- case VGT_STRMOUT_BUFFER_OFFSET_3:
- /* These should be handled by STRMOUT_BUFFER packet 3 */
- DRM_ERROR("bad context reg: 0x%08x\n", reg);
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_RESOURCE:
@@ -646,6 +1227,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
@@ -655,6 +1239,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -662,6 +1247,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = r600_check_texture_resource(p, idx+(i*7)+1,
+ texture, mipmap);
+ if (r)
+ return r;
break;
case SQ_TEX_VTX_VALID_BUFFER:
/* vtx base */
@@ -670,6 +1260,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
+ offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
+ }
ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
@@ -682,13 +1279,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_SET_ALU_CONST:
- start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
- end_reg = 4 * pkt->count + start_reg - 4;
- if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
- (start_reg >= PACKET3_SET_ALU_CONST_END) ||
- (end_reg >= PACKET3_SET_ALU_CONST_END)) {
- DRM_ERROR("bad SET_ALU_CONST\n");
- return -EINVAL;
+ if (track->sq_config & DX9_CONSTS) {
+ start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+ (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+ DRM_ERROR("bad SET_ALU_CONST\n");
+ return -EINVAL;
+ }
}
break;
case PACKET3_SET_BOOL_CONST:
@@ -760,11 +1359,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
struct r600_cs_track *track;
int r;
- track = kzalloc(sizeof(*track), GFP_KERNEL);
- p->track = track;
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ if (p->rdev->family < CHIP_RV770) {
+ track->npipes = p->rdev->config.r600.tiling_npipes;
+ track->nbanks = p->rdev->config.r600.tiling_nbanks;
+ track->group_size = p->rdev->config.r600.tiling_group_size;
+ } else if (p->rdev->family <= CHIP_RV740) {
+ track->npipes = p->rdev->config.rv770.tiling_npipes;
+ track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+ track->group_size = p->rdev->config.rv770.tiling_group_size;
+ }
+ p->track = track;
+ }
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
p->idx += pkt.count + 2;
@@ -779,9 +1395,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
return -EINVAL;
}
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1411,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
mdelay(1);
}
#endif
+ kfree(p->track);
+ p->track = NULL;
return 0;
}
@@ -833,9 +1455,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
+ struct radeon_ib fake_ib;
+ struct r600_cs_track *track;
int r;
+ /* initialize tracker */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -843,6 +1472,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
+ parser.track = track;
fake_ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index fcc949df0e5d..26b4bc9d89a5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format {
*/
enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_DIG_ENABLE = 0x01,
- AUDIO_STATUS_V = 0x02,
- AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
AUDIO_STATUS_EMPHASIS = 0x08,
AUDIO_STATUS_COPYRIGHT = 0x10,
AUDIO_STATUS_NONAUDIO = 0x20,
AUDIO_STATUS_PROFESSIONAL = 0x40,
- AUDIO_STATUS_LEVEL = 0x80
+ AUDIO_STATUS_LEVEL = 0x80
};
struct {
@@ -85,7 +85,7 @@ struct {
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
- *CTS = clock*N/(128*freq)*1000;
+ *CTS = clock * N / (128 * freq) * 1000;
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
N, *CTS, freq);
}
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType,
uint8_t length,
uint8_t *frame)
{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
}
/*
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
if (!offset)
return;
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+ if (!radeon_encoder->hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder)) {
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+ /* disable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ /* enable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
@@ -314,6 +312,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (!offset)
return;
@@ -342,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
uint32_t iec;
if (!offset)
@@ -412,95 +411,173 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
+}
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
+static int r600_hdmi_find_free_block(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ bool free_blocks[3] = { true, true, true };
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->hdmi_offset) {
+ case R600_HDMI_BLOCK1:
+ free_blocks[0] = false;
+ break;
+ case R600_HDMI_BLOCK2:
+ free_blocks[1] = false;
+ break;
+ case R600_HDMI_BLOCK3:
+ free_blocks[2] = false;
+ break;
+ }
+ }
+
+ if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
+ return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
+ } else if (rdev->family >= CHIP_R600) {
+ if (free_blocks[0])
+ return R600_HDMI_BLOCK1;
+ else if (free_blocks[1])
+ return R600_HDMI_BLOCK2;
+ }
+ return 0;
}
-/*
- * enable/disable the HDMI engine
- */
-void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+static void r600_hdmi_assign_block(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (!offset)
+ if (!dig) {
+ dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
+ }
- DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
-
- /* some version of atombios ignore the enable HDMI flag
- * so enabling/disabling HDMI was moved here for TMDS1+2 */
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* This part is doubtfull in my opinion */
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
- break;
-
- default:
- DRM_ERROR("unknown HDMI output type\n");
- break;
+ if (ASIC_IS_DCE4(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE3(rdev)) {
+ radeon_encoder->hdmi_offset = dig->dig_encoder ?
+ R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
+ if (ASIC_IS_DCE32(rdev))
+ radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
+ R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
+ } else if (rdev->family >= CHIP_R600) {
+ radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
/*
- * determin at which register offset the HDMI encoder is
+ * enable the HDMI engine
*/
-void r600_hdmi_init(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- switch (r600_audio_tmds_index(encoder)) {
- case 0:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
+ if (!radeon_encoder->hdmi_offset) {
+ r600_hdmi_assign_block(encoder);
+ if (!radeon_encoder->hdmi_offset) {
+ dev_warn(rdev->dev, "Could not find HDMI block for "
+ "0x%x encoder\n", radeon_encoder->encoder_id);
+ return;
+ }
+ }
+
+ offset = radeon_encoder->hdmi_offset;
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+ } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
- case 1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
- radeon_encoder->hdmi_offset = 0;
+ dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
- break;
+ }
+
+ if (rdev->irq.installed
+ && rdev->family != CHIP_RS600
+ && rdev->family != CHIP_RS690
+ && rdev->family != CHIP_RS740) {
+
+ /* if irq is available use it */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ radeon_irq_set(rdev);
+
+ r600_audio_disable_polling(encoder);
+ } else {
+ /* if not fallback to polling */
+ r600_audio_enable_polling(encoder);
+ }
+
+ DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+}
+
+/*
+ * disable the HDMI engine
+ */
+void r600_hdmi_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- radeon_encoder->hdmi_offset = R600_HDMI_DIG;
- break;
+ if (ASIC_IS_DCE4(rdev))
+ return;
- default:
- radeon_encoder->hdmi_offset = 0;
- break;
+ offset = radeon_encoder->hdmi_offset;
+ if (!offset) {
+ dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ return;
}
- DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ radeon_irq_set(rdev);
+
+ /* disable polling */
+ r600_audio_disable_polling(encoder);
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+ } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown HDMI output type\n");
+ break;
+ }
+ }
- /* TODO: make this configureable */
- radeon_encoder->hdmi_audio_workaround = 0;
+ radeon_encoder->hdmi_offset = 0;
+ radeon_encoder->hdmi_config_offset = 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ffdeda9..d84612ae47e0 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -152,37 +152,44 @@
#define R600_AUDIO_STATUS_BITS 0x73d8
/* HDMI base register addresses */
-#define R600_HDMI_TMDS1 0x7400
-#define R600_HDMI_TMDS2 0x7700
-#define R600_HDMI_DIG 0x7800
+#define R600_HDMI_BLOCK1 0x7400
+#define R600_HDMI_BLOCK2 0x7700
+#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+# define R600_HDMI_INT_PENDING (1 << 29)
+#define R600_HDMI_CNTL 0x08
+# define R600_HDMI_INT_EN (1 << 28)
+# define R600_HDMI_INT_ACK (1 << 29)
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
+
+/* HDMI additional config base register addresses */
+#define R600_HDMI_CONFIG1 0x7600
+#define R600_HDMI_CONFIG2 0x7a00
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 30480881aed1..59c1f8793e60 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -77,6 +77,55 @@
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
#define CP_STAT 0x8680
@@ -883,6 +932,16 @@
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define R_028C04_PA_SC_AA_CONFIG 0x028C04
+#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
+#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
+#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
+#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
+#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
+#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
+#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
+#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
+#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +964,461 @@
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
-
+#define R_0280A0_CB_COLOR0_INFO 0x0280A0
+#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_0280A0_ENDIAN 0xFFFFFFFC
+#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_0280A0_FORMAT 0xFFFFFF03
+#define V_0280A0_COLOR_INVALID 0x00000000
+#define V_0280A0_COLOR_8 0x00000001
+#define V_0280A0_COLOR_4_4 0x00000002
+#define V_0280A0_COLOR_3_3_2 0x00000003
+#define V_0280A0_COLOR_16 0x00000005
+#define V_0280A0_COLOR_16_FLOAT 0x00000006
+#define V_0280A0_COLOR_8_8 0x00000007
+#define V_0280A0_COLOR_5_6_5 0x00000008
+#define V_0280A0_COLOR_6_5_5 0x00000009
+#define V_0280A0_COLOR_1_5_5_5 0x0000000A
+#define V_0280A0_COLOR_4_4_4_4 0x0000000B
+#define V_0280A0_COLOR_5_5_5_1 0x0000000C
+#define V_0280A0_COLOR_32 0x0000000D
+#define V_0280A0_COLOR_32_FLOAT 0x0000000E
+#define V_0280A0_COLOR_16_16 0x0000000F
+#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
+#define V_0280A0_COLOR_8_24 0x00000011
+#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
+#define V_0280A0_COLOR_24_8 0x00000013
+#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
+#define V_0280A0_COLOR_10_11_11 0x00000015
+#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
+#define V_0280A0_COLOR_11_11_10 0x00000017
+#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
+#define V_0280A0_COLOR_2_10_10_10 0x00000019
+#define V_0280A0_COLOR_8_8_8_8 0x0000001A
+#define V_0280A0_COLOR_10_10_10_2 0x0000001B
+#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_0280A0_COLOR_32_32 0x0000001D
+#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
+#define V_0280A0_COLOR_16_16_16_16 0x0000001F
+#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_0280A0_COLOR_32_32_32_32 0x00000022
+#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
+#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
+#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
+#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
+#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
+#define C_0280A0_READ_SIZE 0xFFFF7FFF
+#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
+#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
+#define C_0280A0_COMP_SWAP 0xFFFCFFFF
+#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
+#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
+#define C_0280A0_TILE_MODE 0xFFF3FFFF
+#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
+#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
+#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
+#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
+#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
+#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
+#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
+#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
+#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
+#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
+#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
+#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
+#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
+#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
+#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
+#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
+#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
+#define C_0280A0_ROUND_MODE 0xFDFFFFFF
+#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
+#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
+#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
+#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO 0x0280A4
+#define R_0280A8_CB_COLOR2_INFO 0x0280A8
+#define R_0280AC_CB_COLOR3_INFO 0x0280AC
+#define R_0280B0_CB_COLOR4_INFO 0x0280B0
+#define R_0280B4_CB_COLOR5_INFO 0x0280B4
+#define R_0280B8_CB_COLOR6_INFO 0x0280B8
+#define R_0280BC_CB_COLOR7_INFO 0x0280BC
+#define R_028060_CB_COLOR0_SIZE 0x028060
+#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028060_SLICE_TILE_MAX 0xC00003FF
+#define R_028064_CB_COLOR1_SIZE 0x028064
+#define R_028068_CB_COLOR2_SIZE 0x028068
+#define R_02806C_CB_COLOR3_SIZE 0x02806C
+#define R_028070_CB_COLOR4_SIZE 0x028070
+#define R_028074_CB_COLOR5_SIZE 0x028074
+#define R_028078_CB_COLOR6_SIZE 0x028078
+#define R_02807C_CB_COLOR7_SIZE 0x02807C
+#define R_028238_CB_TARGET_MASK 0x028238
+#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
+#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
+#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
+#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
+#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
+#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
+#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
+#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK 0x02823C
+#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
+#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
+#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
+#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
+#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
+#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
+#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
+#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
+#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
+#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
+#define C_028AB0_STREAMOUT 0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
+#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
+#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
+#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
+#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
+#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
+#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
+#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
+#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
+#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
+#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
+#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
+#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
+#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_028B20_SIZE 0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
+#define S_038000_DIM(x) (((x) & 0x7) << 0)
+#define G_038000_DIM(x) (((x) >> 0) & 0x7)
+#define C_038000_DIM 0xFFFFFFF8
+#define V_038000_SQ_TEX_DIM_1D 0x00000000
+#define V_038000_SQ_TEX_DIM_2D 0x00000001
+#define V_038000_SQ_TEX_DIM_3D 0x00000002
+#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
+#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
+#define C_038000_TILE_MODE 0xFFFFFF87
+#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
+#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
+#define C_038000_TILE_TYPE 0xFFFFFF7F
+#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
+#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
+#define C_038000_PITCH 0xFFF800FF
+#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
+#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
+#define C_038000_TEX_WIDTH 0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
+#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
+#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
+#define C_038004_TEX_HEIGHT 0xFFFFE000
+#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
+#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
+#define C_038004_TEX_DEPTH 0xFC001FFF
+#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
+#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
+#define C_038004_DATA_FORMAT 0x03FFFFFF
+#define V_038004_COLOR_INVALID 0x00000000
+#define V_038004_COLOR_8 0x00000001
+#define V_038004_COLOR_4_4 0x00000002
+#define V_038004_COLOR_3_3_2 0x00000003
+#define V_038004_COLOR_16 0x00000005
+#define V_038004_COLOR_16_FLOAT 0x00000006
+#define V_038004_COLOR_8_8 0x00000007
+#define V_038004_COLOR_5_6_5 0x00000008
+#define V_038004_COLOR_6_5_5 0x00000009
+#define V_038004_COLOR_1_5_5_5 0x0000000A
+#define V_038004_COLOR_4_4_4_4 0x0000000B
+#define V_038004_COLOR_5_5_5_1 0x0000000C
+#define V_038004_COLOR_32 0x0000000D
+#define V_038004_COLOR_32_FLOAT 0x0000000E
+#define V_038004_COLOR_16_16 0x0000000F
+#define V_038004_COLOR_16_16_FLOAT 0x00000010
+#define V_038004_COLOR_8_24 0x00000011
+#define V_038004_COLOR_8_24_FLOAT 0x00000012
+#define V_038004_COLOR_24_8 0x00000013
+#define V_038004_COLOR_24_8_FLOAT 0x00000014
+#define V_038004_COLOR_10_11_11 0x00000015
+#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
+#define V_038004_COLOR_11_11_10 0x00000017
+#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
+#define V_038004_COLOR_2_10_10_10 0x00000019
+#define V_038004_COLOR_8_8_8_8 0x0000001A
+#define V_038004_COLOR_10_10_10_2 0x0000001B
+#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_038004_COLOR_32_32 0x0000001D
+#define V_038004_COLOR_32_32_FLOAT 0x0000001E
+#define V_038004_COLOR_16_16_16_16 0x0000001F
+#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_038004_COLOR_32_32_32_32 0x00000022
+#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_038004_FMT_1 0x00000025
+#define V_038004_FMT_GB_GR 0x00000027
+#define V_038004_FMT_BG_RG 0x00000028
+#define V_038004_FMT_32_AS_8 0x00000029
+#define V_038004_FMT_32_AS_8_8 0x0000002A
+#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
+#define V_038004_FMT_8_8_8 0x0000002C
+#define V_038004_FMT_16_16_16 0x0000002D
+#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
+#define V_038004_FMT_32_32_32 0x0000002F
+#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
+#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
+#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
+#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
+#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
+#define C_038010_REQUEST_SIZE 0xFFFF3FFF
+#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_038010_DST_SEL_X 0xFFF8FFFF
+#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_038010_DST_SEL_Y 0xFFC7FFFF
+#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_038010_DST_SEL_Z 0xFE3FFFFF
+#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_038010_DST_SEL_W 0xF1FFFFFF
+#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_038010_BASE_LEVEL 0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
+#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_038014_LAST_LEVEL 0xFFFFFFF0
+#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_038014_BASE_ARRAY 0xFFFE000F
+#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_038014_LAST_ARRAY 0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
+#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288A8_ITEMSIZE 0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
+#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C44_MEM_SIZE 0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
+#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B0_ITEMSIZE 0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
+#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C54_MEM_SIZE 0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
+#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C0_ITEMSIZE 0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
+#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C74_MEM_SIZE 0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
+#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B4_ITEMSIZE 0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
+#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C5C_MEM_SIZE 0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
+#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288AC_ITEMSIZE 0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
+#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C4C_MEM_SIZE 0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
+#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288BC_ITEMSIZE 0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
+#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C6C_MEM_SIZE 0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
+#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C4_ITEMSIZE 0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
+#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C7C_MEM_SIZE 0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
+#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B8_ITEMSIZE 0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
+#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C64_MEM_SIZE 0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
+#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C8_ITEMSIZE 0xFFFF8000
+#define R_028010_DB_DEPTH_INFO 0x028010
+#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
+#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
+#define C_028010_FORMAT 0xFFFFFFF8
+#define V_028010_DEPTH_INVALID 0x00000000
+#define V_028010_DEPTH_16 0x00000001
+#define V_028010_DEPTH_X8_24 0x00000002
+#define V_028010_DEPTH_8_24 0x00000003
+#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
+#define V_028010_DEPTH_8_24_FLOAT 0x00000005
+#define V_028010_DEPTH_32_FLOAT 0x00000006
+#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
+#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
+#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
+#define C_028010_READ_SIZE 0xFFFFFFF7
+#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
+#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
+#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
+#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
+#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
+#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_028010_TILE_COMPACT 0xFBFFFFFF
+#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE 0x028000
+#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028000_SLICE_TILE_MAX 0xC00003FF
+#define R_028004_DB_DEPTH_VIEW 0x028004
+#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028004_SLICE_START 0xFFFFF800
+#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028004_SLICE_MAX 0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb193e5..2f94dc66c183 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -90,12 +90,15 @@ extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -118,6 +121,21 @@ struct radeon_device;
/*
* BIOS.
*/
+#define ATRM_BIOS_PAGE 4096
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atrm_supported(struct pci_dev *pdev);
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+#else
+static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+ return -EINVAL;
+}
+#endif
bool radeon_get_bios(struct radeon_device *rdev);
@@ -138,17 +156,28 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
struct radeon_clock {
struct radeon_pll p1pll;
struct radeon_pll p2pll;
+ struct radeon_pll dcpll;
struct radeon_pll spll;
struct radeon_pll mpll;
/* 10 Khz units */
uint32_t default_mclk;
uint32_t default_sclk;
+ uint32_t default_dispclk;
+ uint32_t dp_extclk;
};
/*
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void rs690_pm_info(struct radeon_device *rdev);
/*
* Fences.
@@ -157,7 +186,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
- unsigned long count_timeout;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@@ -172,7 +202,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- unsigned long timeout;
bool emited;
bool signaled;
};
@@ -234,6 +263,7 @@ struct radeon_bo_list {
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
+ bool reserved;
};
/*
@@ -275,6 +305,7 @@ union radeon_gart_table {
};
#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
struct radeon_gart {
dma_addr_t table_addr;
@@ -309,21 +340,20 @@ struct radeon_mc {
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
- u64 gtt_location;
+ u64 visible_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
- u64 vram_location;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
- bool igp_sideport_enabled;
+ bool igp_sideport_enabled;
+ u64 gtt_base_align;
};
-int radeon_mc_setup(struct radeon_device *rdev);
bool radeon_combios_sideport_present(struct radeon_device *rdev);
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
@@ -347,9 +377,15 @@ struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[2];
+ bool crtc_vblank_int[6];
+ wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
};
@@ -379,6 +415,7 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
+ struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
@@ -433,9 +470,12 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
+extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
@@ -539,6 +579,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
@@ -570,7 +611,123 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
+
+enum radeon_pm_method {
+ PM_METHOD_PROFILE,
+ PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+ DYNPM_STATE_DISABLED,
+ DYNPM_STATE_MINIMUM,
+ DYNPM_STATE_PAUSED,
+ DYNPM_STATE_ACTIVE,
+ DYNPM_STATE_SUSPENDED,
+};
+enum radeon_dynpm_action {
+ DYNPM_ACTION_NONE,
+ DYNPM_ACTION_MINIMUM,
+ DYNPM_ACTION_DOWNCLOCK,
+ DYNPM_ACTION_UPCLOCK,
+ DYNPM_ACTION_DEFAULT
+};
+
+enum radeon_voltage_type {
+ VOLTAGE_NONE = 0,
+ VOLTAGE_GPIO,
+ VOLTAGE_VDDC,
+ VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+ POWER_STATE_TYPE_DEFAULT,
+ POWER_STATE_TYPE_POWERSAVE,
+ POWER_STATE_TYPE_BATTERY,
+ POWER_STATE_TYPE_BALANCED,
+ POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_profile_type {
+ PM_PROFILE_DEFAULT,
+ PM_PROFILE_AUTO,
+ PM_PROFILE_LOW,
+ PM_PROFILE_MID,
+ PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX 1
+#define PM_PROFILE_MID_SH_IDX 2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX 4
+#define PM_PROFILE_MID_MH_IDX 5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX 7
+
+struct radeon_pm_profile {
+ int dpms_off_ps_idx;
+ int dpms_on_ps_idx;
+ int dpms_off_cm_idx;
+ int dpms_on_cm_idx;
+};
+
+struct radeon_voltage {
+ enum radeon_voltage_type type;
+ /* gpio voltage */
+ struct radeon_gpio_rec gpio;
+ u32 delay; /* delay in usec from voltage drop to sclk change */
+ bool active_high; /* voltage drop is active when bit is high */
+ /* VDDC voltage */
+ u8 vddc_id; /* index into vddc voltage table */
+ u8 vddci_id; /* index into vddci voltage table */
+ bool vddci_enabled;
+ /* r6xx+ sw */
+ u32 voltage;
+};
+
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
+
+struct radeon_pm_clock_info {
+ /* memory clock */
+ u32 mclk;
+ /* engine clock */
+ u32 sclk;
+ /* voltage info */
+ struct radeon_voltage voltage;
+ /* standardized clock flags */
+ u32 flags;
+};
+
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
+struct radeon_power_state {
+ enum radeon_pm_state_type type;
+ /* XXX: use a define for num clock modes */
+ struct radeon_pm_clock_info clock_info[8];
+ /* number of valid clock modes in this power state */
+ int num_clock_modes;
+ struct radeon_pm_clock_info *default_clock_mode;
+ /* standardized state flags */
+ u32 flags;
+ u32 misc; /* vbios specific flags */
+ u32 misc2; /* vbios specific flags */
+ int pcie_lanes; /* pcie lanes */
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
struct radeon_pm {
+ struct mutex mutex;
+ u32 active_crtcs;
+ int active_crtc_count;
+ int req_vblank;
+ bool vblank_sync;
+ bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -581,7 +738,34 @@ struct radeon_pm {
fixed20_12 ht_bandwidth;
fixed20_12 core_bandwidth;
fixed20_12 sclk;
+ fixed20_12 mclk;
fixed20_12 needed_bandwidth;
+ /* XXX: use a define for num power modes */
+ struct radeon_power_state power_state[8];
+ /* number of valid power states */
+ int num_power_states;
+ int current_power_state_index;
+ int current_clock_mode_index;
+ int requested_power_state_index;
+ int requested_clock_mode_index;
+ int default_power_state_index;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 current_vddc;
+ struct radeon_i2c_chan *i2c_bus;
+ /* selected pm method */
+ enum radeon_pm_method pm_method;
+ /* dynpm power management */
+ struct delayed_work dynpm_idle_work;
+ enum radeon_dynpm_state dynpm_state;
+ enum radeon_dynpm_action dynpm_planned_action;
+ unsigned long dynpm_action_timeout;
+ bool dynpm_can_upclock;
+ bool dynpm_can_downclock;
+ /* profile-based power management */
+ enum radeon_pm_profile_type profile;
+ int profile_index;
+ struct radeon_pm_profile profiles[PM_PROFILE_MAX];
};
@@ -604,8 +788,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
-int r100_debugfs_rbbm_init(struct radeon_device *rdev);
-int r100_debugfs_cp_init(struct radeon_device *rdev);
/*
@@ -617,7 +799,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*gpu_reset)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -651,12 +834,13 @@ struct radeon_asic {
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
- int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+ void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
@@ -669,41 +853,84 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ bool (*gui_idle)(struct radeon_device *rdev);
+ /* power management */
+ void (*pm_misc)(struct radeon_device *rdev);
+ void (*pm_prepare)(struct radeon_device *rdev);
+ void (*pm_finish)(struct radeon_device *rdev);
+ void (*pm_init_profile)(struct radeon_device *rdev);
+ void (*pm_get_dynpm_state)(struct radeon_device *rdev);
};
/*
* Asic structures
*/
+struct r100_gpu_lockup {
+ unsigned long last_jiffies;
+ u32 last_cp_rptr;
+};
+
struct r100_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r300_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 resync_scratch;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r600_asic {
- unsigned max_pipes;
- unsigned max_tile_pipes;
- unsigned max_simds;
- unsigned max_backends;
- unsigned max_gprs;
- unsigned max_threads;
- unsigned max_stack_entries;
- unsigned max_hw_contexts;
- unsigned max_gs_threads;
- unsigned sx_max_export_size;
- unsigned sx_max_export_pos_size;
- unsigned sx_max_export_smx_size;
- unsigned sq_num_cf_insts;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
};
struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
@@ -720,7 +947,10 @@ struct rv770_asic {
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_fize;
+ unsigned sc_earlyz_tile_fifo_size;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
};
union radeon_asic_config {
@@ -728,8 +958,15 @@ union radeon_asic_config {
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
};
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
/*
* IOCTL.
@@ -785,9 +1022,6 @@ struct radeon_device {
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
- struct fb_info *fbdev_info;
- struct radeon_bo *fbdev_rbo;
- struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
@@ -830,6 +1064,9 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
+ int num_crtc; /* number of crtcs */
+ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+ struct mutex vram_mutex;
/* audio stuff */
struct timer_list audio_timer;
@@ -838,6 +1075,9 @@ struct radeon_device {
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
+
+ bool powered_down;
+ struct notifier_block acpi_nb;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1135,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1198,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
-
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
/*
* BIOS helpers.
@@ -997,7 +1239,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1015,6 +1258,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1024,14 +1268,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
/* Common functions */
/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
@@ -1042,53 +1296,14 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-struct r100_mc_save {
- u32 GENMO_WT;
- u32 CRTC_EXT_CNTL;
- u32 CRTC_GEN_CNTL;
- u32 CRTC2_GEN_CNTL;
- u32 CUR_OFFSET;
- u32 CUR2_OFFSET;
-};
-extern void r100_cp_disable(struct radeon_device *rdev);
-extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-extern void r100_cp_fini(struct radeon_device *rdev);
-extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
-extern int r100_pci_gart_init(struct radeon_device *rdev);
-extern void r100_pci_gart_fini(struct radeon_device *rdev);
-extern int r100_pci_gart_enable(struct radeon_device *rdev);
-extern void r100_pci_gart_disable(struct radeon_device *rdev);
-extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
-extern void r100_ib_fini(struct radeon_device *rdev);
-extern int r100_ib_init(struct radeon_device *rdev);
-extern void r100_irq_disable(struct radeon_device *rdev);
-extern int r100_irq_set(struct radeon_device *rdev);
-extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_vram_init_sizes(struct radeon_device *rdev);
-extern void r100_wb_disable(struct radeon_device *rdev);
-extern void r100_wb_fini(struct radeon_device *rdev);
-extern int r100_wb_init(struct radeon_device *rdev);
-extern void r100_hdp_reset(struct radeon_device *rdev);
-extern int r100_rb2d_reset(struct radeon_device *rdev);
-extern int r100_cp_reset(struct radeon_device *rdev);
-extern void r100_vga_render_disable(struct radeon_device *rdev);
-extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- struct radeon_bo *robj);
-extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- const unsigned *auth, unsigned n,
- radeon_packet0_check_t check);
-extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx);
-extern void r100_enable_bm(struct radeon_device *rdev);
-extern void r100_set_common_regs(struct radeon_device *rdev);
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1096,7 +1311,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
/* r300,r350,rv350,rv370,rv380 */
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_vram_info(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
extern void r300_clock_startup(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1320,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/* r420,r423,rv410 */
-extern int r420_mc_init(struct radeon_device *rdev);
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1361,14 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode2);
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
+extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1166,28 +1381,47 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
-extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_enable(struct drm_encoder *encoder);
+extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
+
+/* evergreen */
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
#include "radeon_object.h"
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a5556dc..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -134,12 +134,10 @@ int radeon_agp_init(struct radeon_device *rdev)
int ret;
/* Acquire AGP. */
- if (!rdev->ddev->agp->acquired) {
- ret = drm_agp_acquire(rdev->ddev);
- if (ret) {
- DRM_ERROR("Unable to acquire AGP: %d\n", ret);
- return ret;
- }
+ ret = drm_agp_acquire(rdev->ddev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
}
ret = drm_agp_info(rdev->ddev, &info);
@@ -237,6 +235,10 @@ int radeon_agp_init(struct radeon_device *rdev)
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+ rdev->mc.gtt_start = rdev->mc.agp_base;
+ rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
/* workaround some hw issues */
if (rdev->family < CHIP_R200) {
@@ -268,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
}
#endif
}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 000000000000..646f96f97c77
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ BUG_ON(1);
+ return 0;
+}
+
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+ reg, v);
+ BUG_ON(1);
+}
+
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+ rdev->mc_rreg = &radeon_invalid_rreg;
+ rdev->mc_wreg = &radeon_invalid_wreg;
+ rdev->pll_rreg = &radeon_invalid_rreg;
+ rdev->pll_wreg = &radeon_invalid_wreg;
+ rdev->pciep_rreg = &radeon_invalid_rreg;
+ rdev->pciep_wreg = &radeon_invalid_wreg;
+
+ /* Don't change order as we are overridding accessor. */
+ if (rdev->family < CHIP_RV515) {
+ rdev->pcie_reg_mask = 0xff;
+ } else {
+ rdev->pcie_reg_mask = 0x7ff;
+ }
+ /* FIXME: not sure here */
+ if (rdev->family <= CHIP_R580) {
+ rdev->pll_rreg = &r100_pll_rreg;
+ rdev->pll_wreg = &r100_pll_wreg;
+ }
+ if (rdev->family >= CHIP_R420) {
+ rdev->mc_rreg = &r420_mc_rreg;
+ rdev->mc_wreg = &r420_mc_wreg;
+ }
+ if (rdev->family >= CHIP_RV515) {
+ rdev->mc_rreg = &rv515_mc_rreg;
+ rdev->mc_wreg = &rv515_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+ rdev->mc_rreg = &rs400_mc_rreg;
+ rdev->mc_wreg = &rs400_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ rdev->mc_rreg = &rs690_mc_rreg;
+ rdev->mc_wreg = &rs690_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS600) {
+ rdev->mc_rreg = &rs600_mc_rreg;
+ rdev->mc_wreg = &rs600_mc_wreg;
+ }
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+ rdev->pciep_rreg = &r600_pciep_rreg;
+ rdev->pciep_wreg = &r600_pciep_wreg;
+ }
+}
+
+
+/* helper to disable agp */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+ rdev->flags &= ~RADEON_IS_AGP;
+ if (rdev->family >= CHIP_R600) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ } else if (rdev->family >= CHIP_RV515 ||
+ rdev->family == CHIP_RV380 ||
+ rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = NULL,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r200_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r300_asic = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r300_asic_pcie = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r420_asic = {
+ .init = &r420_init,
+ .fini = &r420_fini,
+ .suspend = &r420_suspend,
+ .resume = &r420_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rs400_asic = {
+ .init = &rs400_init,
+ .fini = &rs400_fini,
+ .suspend = &rs400_suspend,
+ .resume = &rs400_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rs600_asic = {
+ .init = &rs600_init,
+ .fini = &rs600_fini,
+ .suspend = &rs600_suspend,
+ .resume = &rs600_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rs600_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs600_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rs690_asic = {
+ .init = &rs690_init,
+ .fini = &rs690_fini,
+ .suspend = &rs690_suspend,
+ .resume = &rs690_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r200_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rv515_asic = {
+ .init = &rv515_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &rv515_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r520_asic = {
+ .init = &r520_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &r520_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+};
+
+static struct radeon_asic r600_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
+ .vga_set_state = &r600_vga_set_state,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .asic_reset = &r600_asic_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rs780_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .vga_set_state = &r600_vga_set_state,
+ .asic_reset = &r600_asic_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic rv770_asic = {
+ .init = &rv770_init,
+ .fini = &rv770_fini,
+ .suspend = &rv770_suspend,
+ .resume = &rv770_resume,
+ .cp_commit = &r600_cp_commit,
+ .asic_reset = &r600_asic_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &rv770_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic evergreen_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+int radeon_asic_init(struct radeon_device *rdev)
+{
+ radeon_register_accessor_init(rdev);
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ rdev->asic = &r100_asic;
+ break;
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ rdev->asic = &r200_asic;
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ if (rdev->flags & RADEON_IS_PCIE)
+ rdev->asic = &r300_asic_pcie;
+ else
+ rdev->asic = &r300_asic;
+ break;
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ rdev->asic = &r420_asic;
+ /* handle macs */
+ if (rdev->bios == NULL) {
+ rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
+ rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
+ rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
+ rdev->asic->set_memory_clock = NULL;
+ }
+ break;
+ case CHIP_RS400:
+ case CHIP_RS480:
+ rdev->asic = &rs400_asic;
+ break;
+ case CHIP_RS600:
+ rdev->asic = &rs600_asic;
+ break;
+ case CHIP_RS690:
+ case CHIP_RS740:
+ rdev->asic = &rs690_asic;
+ break;
+ case CHIP_RV515:
+ rdev->asic = &rv515_asic;
+ break;
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ rdev->asic = &r520_asic;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RV670:
+ rdev->asic = &r600_asic;
+ break;
+ case CHIP_RS780:
+ case CHIP_RS880:
+ rdev->asic = &rs780_asic;
+ break;
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ rdev->asic = &rv770_asic;
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->asic = &evergreen_asic;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
+ }
+
+ /* set the number of crtcs */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ rdev->num_crtc = 1;
+ else {
+ if (ASIC_IS_DCE4(rdev))
+ rdev->num_crtc = 6;
+ else
+ rdev->num_crtc = 2;
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper around modesetting bits. Move to radeon_clocks.c?
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_static_clocks_init(rdev->ddev);
+ if (r) {
+ return r;
+ }
+ DRM_INFO("Clocks initialized !\n");
+ return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1aeac3fd..c0bbaa64157a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,16 +43,25 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/*
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * r100,rv100,rs100,rv200,rs200
*/
-extern int r100_init(struct radeon_device *rdev);
-extern void r100_fini(struct radeon_device *rdev);
-extern int r100_suspend(struct radeon_device *rdev);
-extern int r100_resume(struct radeon_device *rdev);
+struct r100_mc_save {
+ u32 GENMO_WT;
+ u32 CRTC_EXT_CNTL;
+ u32 CRTC_GEN_CNTL;
+ u32 CRTC2_GEN_CNTL;
+ u32 CUR_OFFSET;
+ u32 CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -73,7 +82,7 @@ int r100_copy_blit(struct radeon_device *rdev,
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
-int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
@@ -82,44 +91,56 @@ void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_ib_fini(struct radeon_device *rdev);
+int r100_ib_init(struct radeon_device *rdev);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+void r100_wb_disable(struct radeon_device *rdev);
+void r100_wb_fini(struct radeon_device *rdev);
+int r100_wb_init(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ const unsigned *auth, unsigned n,
+ radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
-static struct radeon_asic r100_asic = {
- .init = &r100_init,
- .fini = &r100_fini,
- .suspend = &r100_suspend,
- .resume = &r100_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
- .cs_parse = &r100_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = NULL,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@@ -128,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -138,47 +160,7 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
-extern int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
-static struct radeon_asic r300_asic = {
- .init = &r300_init,
- .fini = &r300_fini,
- .suspend = &r300_suspend,
- .resume = &r300_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -187,43 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
-static struct radeon_asic r420_asic = {
- .init = &r420_init,
- .fini = &r420_fini,
- .suspend = &r420_suspend,
- .resume = &r420_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
+extern void r420_pm_init_profile(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -236,47 +182,11 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-static struct radeon_asic rs400_asic = {
- .init = &rs400_init,
- .fini = &rs400_fini,
- .suspend = &rs400_suspend,
- .resume = &rs400_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* rs600.
*/
+extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -294,42 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-
-static struct radeon_asic rs600_asic = {
- .init = &rs600_init,
- .fini = &rs600_fini,
- .suspend = &rs600_suspend,
- .resume = &rs600_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs600_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .bandwidth_update = &rs600_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
/*
* rs690,rs740
@@ -341,50 +218,12 @@ int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
-static struct radeon_asic rs690_asic = {
- .init = &rs690_init,
- .fini = &rs690_fini,
- .suspend = &rs690_suspend,
- .resume = &rs690_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r300_copy_dma,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rs690_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* rv515
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@@ -393,85 +232,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
-static struct radeon_asic rv515_asic = {
- .init = &rv515_init,
- .fini = &rv515_fini,
- .suspend = &rv515_suspend,
- .resume = &rv515_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
-static struct radeon_asic r520_asic = {
- .init = &r520_init,
- .fini = &rv515_fini,
- .suspend = &rv515_suspend,
- .resume = &r520_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -497,11 +263,12 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
-int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -513,42 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
-
-static struct radeon_asic r600_asic = {
- .init = &r600_init,
- .fini = &r600_fini,
- .suspend = &r600_suspend,
- .resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
- .vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
-};
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -557,42 +293,30 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
-static struct radeon_asic rv770_asic = {
- .init = &rv770_init,
- .fini = &rv770_fini,
- .suspend = &rv770_suspend,
- .resume = &rv770_resume,
- .cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
- .vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
-};
+/*
+ * evergreen
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d8831548a5f..10673ae59cfa 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -69,52 +69,54 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
- uint16_t data_offset;
- int i;
+ uint16_t data_offset, size;
+ int i, num_indices;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
- atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
-
- i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
-
-
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
-
- if (gpio->sucI2cId.ucAccess == id) {
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- i2c.valid = true;
- break;
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ i2c.valid = true;
+ break;
+ }
}
}
@@ -135,20 +137,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
gpio.valid = false;
- atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
- gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
- num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
-
- for (i = 0; i < num_indices; i++) {
- pin = &gpio_info->asGPIO_Pin[i];
- if (id == pin->ucGPIO_ID) {
- gpio.id = pin->ucGPIO_ID;
- gpio.reg = pin->usGpioPin_AIndex * 4;
- gpio.mask = (1 << pin->ucGpioPinBitShift);
- gpio.valid = true;
- break;
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
}
}
@@ -159,8 +162,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_gpio_rec *gpio)
{
struct radeon_hpd hpd;
+ u32 reg;
+
+ if (ASIC_IS_DCE4(rdev))
+ reg = EVERGREEN_DC_GPIO_HPD_A;
+ else
+ reg = AVIVO_DC_GPIO_HPD_A;
+
hpd.gpio = *gpio;
- if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ if (gpio->reg == reg) {
switch(gpio->mask) {
case (1 << 0):
hpd.hpd = RADEON_HPD_1;
@@ -257,6 +267,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
+ if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+ *line_mux = 0x90;
}
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -268,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* ASUS HD 3600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x9598) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01e4)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
/* ASUS HD 3450 board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x95C5) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -388,9 +409,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
-
- if (data_offset == 0)
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
return false;
if (crev < 2)
@@ -442,37 +461,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
GetIndexIntoMasterTable(DATA,
IntegratedSystemInfo);
- atom_parse_data_header(ctx, index, &size, &frev,
- &crev, &igp_offset);
-
- if (crev >= 2) {
- igp_obj =
- (ATOM_INTEGRATED_SYSTEM_INFO_V2
- *) (ctx->bios + igp_offset);
-
- if (igp_obj) {
- uint32_t slot_config, ct;
-
- if (con_obj_num == 1)
- slot_config =
- igp_obj->
- ulDDISlot1Config;
- else
- slot_config =
- igp_obj->
- ulDDISlot2Config;
-
- ct = (slot_config >> 16) & 0xff;
- connector_type =
- object_connector_convert
- [ct];
- connector_object_id = ct;
- igp_lane_info =
- slot_config & 0xffff;
+ if (atom_parse_data_header(ctx, index, &size, &frev,
+ &crev, &igp_offset)) {
+
+ if (crev >= 2) {
+ igp_obj =
+ (ATOM_INTEGRATED_SYSTEM_INFO_V2
+ *) (ctx->bios + igp_offset);
+
+ if (igp_obj) {
+ uint32_t slot_config, ct;
+
+ if (con_obj_num == 1)
+ slot_config =
+ igp_obj->
+ ulDDISlot1Config;
+ else
+ slot_config =
+ igp_obj->
+ ulDDISlot2Config;
+
+ ct = (slot_config >> 16) & 0xff;
+ connector_type =
+ object_connector_convert
+ [ct];
+ connector_object_id = ct;
+ igp_lane_info =
+ slot_config & 0xffff;
+ } else
+ continue;
} else
continue;
- } else
- continue;
+ } else {
+ igp_lane_info = 0;
+ connector_type =
+ object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
+ }
} else {
igp_lane_info = 0;
connector_type =
@@ -514,6 +539,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
/* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -531,7 +558,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
- hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
@@ -569,11 +595,11 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else {
- hpd.hpd = RADEON_HPD_NONE;
- ddc_bus.valid = false;
}
+ /* needed for aux chan transactions */
+ ddc_bus.hpd = hpd.hpd;
+
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
@@ -617,20 +643,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
uint8_t frev, crev;
ATOM_XTMDS_INFO *xtmds;
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
- xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+ if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+ xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
- if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
- if (connector_type == DRM_MODE_CONNECTOR_DVII)
- return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
- else
- return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
- } else {
- if (connector_type == DRM_MODE_CONNECTOR_DVII)
- return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
- else
- return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
- }
+ if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ } else {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ }
+ } else
+ return supported_devices_connector_object_id_convert
+ [connector_type];
} else {
return supported_devices_connector_object_id_convert
[connector_type];
@@ -660,9 +689,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint8_t dac;
union atom_supported_devices *supported_devices;
int i, j, max_device;
- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+ struct bios_connector *bios_connectors;
+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+
+ bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+ if (!bios_connectors)
+ return false;
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset)) {
+ kfree(bios_connectors);
+ return false;
+ }
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
@@ -830,6 +868,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_link_encoder_connector(dev);
+ kfree(bios_connectors);
return true;
}
@@ -838,6 +877,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_2 info_12;
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
+ ATOM_FIRMWARE_INFO_V2_1 info_21;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,18 +889,16 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
uint8_t frev, crev;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
uint16_t data_offset;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- firmware_info =
- (union firmware_info *)(mode_info->atom_context->bios +
- data_offset);
-
- if (firmware_info) {
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
/* pixel clocks */
p1pll->reference_freq =
le16_to_cpu(firmware_info->info.usReferenceClock);
@@ -875,6 +913,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ if (crev >= 4) {
+ p1pll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_min == 0)
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_max == 0)
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ } else {
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ }
+
if (p1pll->pll_out_min == 0) {
if (ASIC_IS_AVIVO(rdev))
p1pll->pll_out_min = 64800;
@@ -951,8 +1003,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_mclk =
le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+ if (ASIC_IS_DCE4(rdev)) {
+ rdev->clock.default_dispclk =
+ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+ if (rdev->clock.default_dispclk == 0)
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ rdev->clock.dp_extclk =
+ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+ }
+ *dcpll = *p1pll;
+
return true;
}
+
return false;
}
@@ -969,17 +1032,21 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
u8 frev, crev;
u16 data_offset;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
-
- if (igp_info) {
switch (crev) {
case 1:
- if (igp_info->info.ucMemoryType & 0xf0)
- return true;
+ /* AMD IGPS */
+ if ((rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740)) {
+ if (igp_info->info.ulBootUpMemoryClock)
+ return true;
+ } else {
+ if (igp_info->info.ucMemoryType & 0xf0)
+ return true;
+ }
break;
case 2:
if (igp_info->info_2.ucMemoryType & 0x0f)
@@ -1006,14 +1073,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
uint16_t maxfreq;
int i;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- tmds_info =
- (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
- data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ tmds_info =
+ (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+ data_offset);
- if (tmds_info) {
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
for (i = 0; i < 4; i++) {
tmds->tmds_pll[i].freq =
@@ -1062,13 +1127,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- ss_info =
- (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ ss_info =
+ (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
- if (ss_info) {
ss =
kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
@@ -1109,13 +1172,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- lvds_info =
- (union lvds_info *)(mode_info->atom_context->bios + data_offset);
-
- if (lvds_info) {
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ lvds_info =
+ (union lvds_info *)(mode_info->atom_context->bios + data_offset);
lvds =
kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
@@ -1137,7 +1197,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
@@ -1161,6 +1221,18 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ else
+ lvds->pll_algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ lvds->pll_algo = PLL_ALGO_NEW;
+ else
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
encoder->native_mode = lvds->native_mode;
}
return lvds;
@@ -1179,11 +1251,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
uint8_t bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
- dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
-
- if (dac_info) {
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
if (!p_dac)
@@ -1208,12 +1280,14 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
u8 frev, crev;
u16 data_offset, misc;
- atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
+ if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+ &frev, &crev, &data_offset))
+ return false;
switch (crev) {
case 1:
tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
- if (index > MAX_SUPPORTED_TV_TIMING)
+ if (index >= MAX_SUPPORTED_TV_TIMING)
return false;
mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
@@ -1251,7 +1325,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
break;
case 2:
tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
- if (index > MAX_SUPPORTED_TV_TIMING_V1_2)
+ if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
return false;
dtd_timings = &tv_info_v1_2->aModeTimings[index];
@@ -1300,47 +1374,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
struct _ATOM_ANALOG_TV_INFO *tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
- tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+ (mode_info->atom_context->bios + data_offset);
- switch (tv_info->ucTV_BootUpDefaultStandard) {
- case ATOM_TV_NTSC:
- tv_std = TV_STD_NTSC;
- DRM_INFO("Default TV standard: NTSC\n");
- break;
- case ATOM_TV_NTSCJ:
- tv_std = TV_STD_NTSC_J;
- DRM_INFO("Default TV standard: NTSC-J\n");
- break;
- case ATOM_TV_PAL:
- tv_std = TV_STD_PAL;
- DRM_INFO("Default TV standard: PAL\n");
- break;
- case ATOM_TV_PALM:
- tv_std = TV_STD_PAL_M;
- DRM_INFO("Default TV standard: PAL-M\n");
- break;
- case ATOM_TV_PALN:
- tv_std = TV_STD_PAL_N;
- DRM_INFO("Default TV standard: PAL-N\n");
- break;
- case ATOM_TV_PALCN:
- tv_std = TV_STD_PAL_CN;
- DRM_INFO("Default TV standard: PAL-CN\n");
- break;
- case ATOM_TV_PAL60:
- tv_std = TV_STD_PAL_60;
- DRM_INFO("Default TV standard: PAL-60\n");
- break;
- case ATOM_TV_SECAM:
- tv_std = TV_STD_SECAM;
- DRM_INFO("Default TV standard: SECAM\n");
- break;
- default:
- tv_std = TV_STD_NTSC;
- DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
- break;
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_INFO("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_INFO("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_INFO("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
}
return tv_std;
}
@@ -1358,11 +1435,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
uint8_t bg, dac;
struct radeon_encoder_tv_dac *tv_dac = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
- dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
- if (dac_info) {
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
if (!tv_dac)
@@ -1385,20 +1463,532 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
return tv_dac;
}
-void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+static const char *thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "asc7xxx",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "RV6xx",
+ "RV770",
+ "adt7473",
+ "External GPIO",
+ "Evergreen",
+ "adt7473 with internal",
+
+};
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+};
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
{
- DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u32 misc, misc2 = 0, sclk, mclk;
+ union power_info *power_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ struct _ATOM_PPLIB_STATE *power_state;
+ int num_modes = 0, i, j;
+ int state_index = 0, mode_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+ if (frev < 4) {
+ /* add the i2c bus for thermal/fan chip */
+ if (power_info->info.ucOverdriveThermalController > 0) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ state_index++;
+ break;
+ }
+ }
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ } else {
+ int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ uint8_t fw_frev, fw_crev;
+ uint16_t fw_data_offset, vddc = 0;
+ union firmware_info *firmware_info;
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+
+ if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
+ &fw_frev, &fw_crev, &fw_data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ fw_data_offset);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ }
- args.ucEnable = enable;
+ /* add the i2c bus for thermal/fan chip */
+ /* no support for internal controller yet */
+ if (controller->ucType > 0) {
+ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
+ } else {
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ }
+ /* first mode is usually default, followed by low to high */
+ for (i = 0; i < power_info->info_4.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (struct _ATOM_PPLIB_STATE *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usStateArrayOffset) +
+ i * power_info->info_4.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
+ (power_state->ucNonClockStateIndex *
+ power_info->info_4.ucNonClockSize));
+ for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
+ if (rdev->flags & RADEON_IS_IGP) {
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
+ sclk |= clock_info->ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ continue;
+ /* voltage works differently on IGPs */
+ mode_index++;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ /* XXX usVDDCI */
+ mode_index++;
+ } else {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ }
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ misc2 = le16_to_cpu(non_clock_info->usClassification);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
+ }
+ }
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ }
+ } else {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
+ }
+
+ rdev->pm.num_power_states = state_index;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
}
-void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
- ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
args.ucEnable = enable;
@@ -1448,6 +2038,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union set_voltage {
+ struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+ struct _SET_VOLTAGE_PARAMETERS v1;
+ struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev, volt_index = level;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (crev) {
+ case 1:
+ args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+ args.v1.ucVoltageIndex = volt_index;
+ break;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+ args.v2.usVoltageLevel = cpu_to_le16(level);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+
+
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 000000000000..ed5dfe58f29c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/pci.h>
+
+#define ATPX_VERSION 0
+#define ATPX_GPU_PWR 2
+#define ATPX_MUX_SELECT 3
+
+#define ATPX_INTEGRATED 0
+#define ATPX_DISCRETE 1
+
+#define ATPX_MUX_IGD 0
+#define ATPX_MUX_DISCRETE 1
+
+static struct radeon_atpx_priv {
+ bool atpx_detected;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+ acpi_handle atrm_handle;
+} radeon_atpx_priv;
+
+/* retrieve the ROM in 4k blocks */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object atrm_arg_elements[2], *obj;
+ struct acpi_object_list atrm_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atrm_arg.count = 2;
+ atrm_arg.pointer = &atrm_arg_elements[0];
+
+ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[0].integer.value = offset;
+
+ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ /* get the discrete ROM only via ATRM */
+ if (!radeon_atpx_priv.atpx_detected)
+ return false;
+
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return false;
+ return true;
+}
+
+
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+}
+
+static int radeon_atpx_get_version(acpi_handle handle)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2], *obj;
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = ATPX_VERSION;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[1].integer.value = ATPX_VERSION;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ if (obj && (obj->type == ACPI_TYPE_BUFFER))
+ printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
+ kfree(buffer.pointer);
+ return 0;
+}
+
+static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2];
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ uint8_t buf[4] = {0};
+
+ if (!handle)
+ return -EINVAL;
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = cmd_id;
+
+ buf[2] = value & 0xff;
+ buf[3] = (value >> 8) & 0xff;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atpx_arg_elements[1].buffer.length = 4;
+ atpx_arg_elements[1].buffer.pointer = buf;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ kfree(buffer.pointer);
+
+ return 0;
+}
+
+static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
+{
+ return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
+}
+
+static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
+{
+ return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
+}
+
+
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
+ else
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
+ return 0;
+}
+
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ /* on w500 ACPI can't change intel gpu state */
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
+ return 0;
+}
+
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, atpx_handle, atrm_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+ radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+}
+
+static int radeon_atpx_init(void)
+{
+ /* set up the ATPX handle */
+
+ radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
+ return 0;
+}
+
+static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler radeon_atpx_handler = {
+ .switchto = radeon_atpx_switchto,
+ .power_state = radeon_atpx_power_state,
+ .init = radeon_atpx_init,
+ .get_client_id = radeon_atpx_get_client_id,
+};
+
+static bool radeon_atpx_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
+ if (has_atpx && vga_count == 2) {
+ acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void radeon_register_atpx_handler(void)
+{
+ bool r;
+
+ /* detect if we have any ATPX + 2 VGA in the system */
+ r = radeon_atpx_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&radeon_atpx_handler);
+}
+
+void radeon_unregister_atpx_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 906921740c60..2c9213739999 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,8 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
/*
* BIOS.
*/
@@ -46,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
+ if (!(rdev->flags & RADEON_IS_IGP))
+ if (!radeon_card_posted(rdev))
+ return false;
+
rdev->bios = NULL;
vram_base = drm_get_resource_start(rdev->ddev, 0);
bios = ioremap(vram_base, size);
@@ -62,7 +68,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
iounmap(bios);
return false;
}
- memcpy(rdev->bios, bios, size);
+ memcpy_fromio(rdev->bios, bios, size);
iounmap(bios);
return true;
}
@@ -83,16 +89,47 @@ static bool radeon_read_bios(struct radeon_device *rdev)
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmalloc(size, GFP_KERNEL);
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- memcpy(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ int ret;
+ int size = 64 * 1024;
+ int i;
+
+ if (!radeon_atrm_supported(rdev->pdev))
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+ DRM_ERROR("Unable to allocate bios\n");
+ return false;
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+ ret = radeon_atrm_get_bios_chunk(rdev->bios,
+ (i * ATRM_BIOS_PAGE),
+ ATRM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+
+ if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ kfree(rdev->bios);
+ return false;
+ }
+ return true;
+}
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -388,16 +425,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
+
bool radeon_get_bios(struct radeon_device *rdev)
{
bool r;
uint16_t tmp;
- if (rdev->flags & RADEON_IS_IGP) {
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
- r = radeon_read_bios(rdev);
- } else
+ if (r == false)
r = radeon_read_bios(rdev);
if (r == false) {
r = radeon_read_disabled_bios(rdev);
@@ -408,6 +445,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
return false;
}
if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+ goto free_bios;
+ }
+
+ tmp = RBIOS16(0x18);
+ if (RBIOS8(tmp + 0x14) != 0x0) {
+ DRM_INFO("Not an x86 BIOS ROM, not using.\n");
goto free_bios;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405bf42f..f64936cc4dd9 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
p2pll->max_frac_feedback_div = 0;
}
+ /* dcpll is DCE4 only */
+ dcpll->min_post_div = 2;
+ dcpll->max_post_div = 0x7f;
+ dcpll->min_frac_feedback_div = 0;
+ dcpll->max_frac_feedback_div = 9;
+ dcpll->min_ref_div = 2;
+ dcpll->max_ref_div = 0x3ff;
+ dcpll->min_feedback_div = 4;
+ dcpll->max_feedback_div = 0xfff;
+ dcpll->best_vco = 0;
+
p1pll->min_ref_div = 2;
p1pll->max_ref_div = 0x3ff;
p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
/* XXX make sure engine is idle */
if (radeon_dynclks != -1) {
- if (radeon_dynclks)
- radeon_set_clock_gating(rdev, 1);
+ if (radeon_dynclks) {
+ if (rdev->asic->set_clock_gating)
+ radeon_set_clock_gating(rdev, 1);
+ }
}
radeon_apply_clock_quirks(rdev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e7b19440102e..2417d7b06fdb 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
int rev;
uint16_t offset = 0, check_offset;
+ if (!rdev->bios)
+ return 0;
+
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+ int edid_info;
+ struct edid *edid;
+ unsigned char *raw;
+ edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ if (!edid_info)
+ return false;
+
+ raw = rdev->bios + edid_info;
+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
+ if (edid == NULL)
+ return false;
+
+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
+
+ if (!drm_edid_is_valid(edid)) {
+ kfree(edid);
+ return false;
+ }
+
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ return true;
+}
+
+struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+ if (rdev->mode_info.bios_hardcoded_edid)
+ return rdev->mode_info.bios_hardcoded_edid;
+ return NULL;
+}
+
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
int ddc_line)
{
@@ -486,9 +522,62 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_reg = ddc_line;
}
- if (rdev->family < CHIP_R200)
- i2c.hw_capable = false;
- else {
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R200:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_MONID:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_CRT2_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
switch (ddc_line) {
case RADEON_GPIO_VGA_DDC:
case RADEON_GPIO_DVI_DDC:
@@ -504,9 +593,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.hw_capable = false;
break;
}
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
+ i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
i2c.valid = true;
@@ -527,9 +621,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
int8_t rev;
uint16_t sclk, mclk;
- if (rdev->bios == NULL)
- return false;
-
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
rev = RBIOS8(pll_info);
@@ -539,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
p1pll->reference_div = RBIOS16(pll_info + 0x10);
p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
if (rev > 9) {
p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
@@ -654,9 +747,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
if (!p_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
@@ -670,10 +760,11 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (p_dac->ps2_pdac_adj)
+ found = 1;
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
@@ -687,9 +778,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
- if (rdev->bios == NULL)
- return tv_std;
-
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +881,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
if (!tv_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* first check TV table */
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (dac_info) {
@@ -812,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0x10) & 0xf;
dac = RBIOS8(dac_info + 0x11) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
} else if (rev > 1) {
bg = RBIOS8(dac_info + 0xc) & 0xf;
dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -825,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0xe) & 0xf;
dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
}
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
@@ -842,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
} else {
bg = RBIOS8(dac_info + 0x4) & 0xf;
dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -850,14 +941,15 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
- found = 1;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
}
} else {
DRM_INFO("No TV DAC info found in BIOS\n");
}
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
@@ -945,11 +1037,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
int tmp, i;
struct radeon_encoder_lvds *lvds = NULL;
- if (rdev->bios == NULL) {
- lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
- goto out;
- }
-
lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
if (lcd_info) {
@@ -1026,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
break;
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
- (RBIOS16(tmp + 2) ==
- lvds->native_mode.vdisplay)) {
- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
- RBIOS16(tmp + 21)) * 8;
-
- lvds->native_mode.vtotal = RBIOS16(tmp + 24);
- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
- lvds->native_mode.vsync_end =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
- (RBIOS16(tmp + 28) & 0x7ff);
+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ (RBIOS8(tmp + 23) * 8);
+
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
@@ -1050,7 +1139,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
DRM_INFO("No panel info found in BIOS\n");
lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
}
-out:
+
if (lvds)
encoder->native_mode = lvds->native_mode;
return lvds;
@@ -1102,9 +1191,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
int i, n;
uint8_t ver;
- if (rdev->bios == NULL)
- return false;
-
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
@@ -1184,9 +1270,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
- if (rdev->bios == NULL)
- return false;
-
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1336,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_LCD: /* MM i2c */
- DRM_ERROR("MM i2c requires hw i2c engine\n");
+ i2c_bus.valid = true;
+ i2c_bus.hw_capable = true;
+ i2c_bus.mm_i2c = true;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
default:
DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1279,52 +1365,57 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
rdev->mode_info.connector_table = radeon_connector_table;
if (rdev->mode_info.connector_table == CT_NONE) {
#ifdef CONFIG_PPC_PMAC
- if (machine_is_compatible("PowerBook3,3")) {
+ if (of_machine_is_compatible("PowerBook3,3")) {
/* powerbook with VGA */
rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
- } else if (machine_is_compatible("PowerBook3,4") ||
- machine_is_compatible("PowerBook3,5")) {
+ } else if (of_machine_is_compatible("PowerBook3,4") ||
+ of_machine_is_compatible("PowerBook3,5")) {
/* powerbook with internal tmds */
rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
- } else if (machine_is_compatible("PowerBook5,1") ||
- machine_is_compatible("PowerBook5,2") ||
- machine_is_compatible("PowerBook5,3") ||
- machine_is_compatible("PowerBook5,4") ||
- machine_is_compatible("PowerBook5,5")) {
+ } else if (of_machine_is_compatible("PowerBook5,1") ||
+ of_machine_is_compatible("PowerBook5,2") ||
+ of_machine_is_compatible("PowerBook5,3") ||
+ of_machine_is_compatible("PowerBook5,4") ||
+ of_machine_is_compatible("PowerBook5,5")) {
/* powerbook with external single link tmds (sil164) */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
- } else if (machine_is_compatible("PowerBook5,6")) {
+ } else if (of_machine_is_compatible("PowerBook5,6")) {
/* powerbook with external dual or single link tmds */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
- } else if (machine_is_compatible("PowerBook5,7") ||
- machine_is_compatible("PowerBook5,8") ||
- machine_is_compatible("PowerBook5,9")) {
+ } else if (of_machine_is_compatible("PowerBook5,7") ||
+ of_machine_is_compatible("PowerBook5,8") ||
+ of_machine_is_compatible("PowerBook5,9")) {
/* PowerBook6,2 ? */
/* powerbook with external dual link tmds (sil1178?) */
rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
- } else if (machine_is_compatible("PowerBook4,1") ||
- machine_is_compatible("PowerBook4,2") ||
- machine_is_compatible("PowerBook4,3") ||
- machine_is_compatible("PowerBook6,3") ||
- machine_is_compatible("PowerBook6,5") ||
- machine_is_compatible("PowerBook6,7")) {
+ } else if (of_machine_is_compatible("PowerBook4,1") ||
+ of_machine_is_compatible("PowerBook4,2") ||
+ of_machine_is_compatible("PowerBook4,3") ||
+ of_machine_is_compatible("PowerBook6,3") ||
+ of_machine_is_compatible("PowerBook6,5") ||
+ of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
- } else if (machine_is_compatible("PowerMac4,4")) {
+ } else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
- } else if (machine_is_compatible("PowerMac10,1")) {
+ } else if (of_machine_is_compatible("PowerMac10,1")) {
/* mini with internal tmds */
rdev->mode_info.connector_table = CT_MINI_INTERNAL;
- } else if (machine_is_compatible("PowerMac10,2")) {
+ } else if (of_machine_is_compatible("PowerMac10,2")) {
/* mini with external tmds */
rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
- } else if (machine_is_compatible("PowerMac12,1")) {
+ } else if (of_machine_is_compatible("PowerMac12,1")) {
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
} else
#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+ if (ASIC_IS_RN50(rdev))
+ rdev->mode_info.connector_table = CT_RN50_POWER;
+ else
+#endif
rdev->mode_info.connector_table = CT_GENERIC;
}
@@ -1767,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_RN50_POWER:
+ DRM_INFO("Connector Table: %d (rn50-power)\n",
+ rdev->mode_info.connector_table);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -1820,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
return false;
}
- /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
- if (dev->pdev->device == 0x5159 &&
- dev->pdev->subsystem_vendor == 0x1002 &&
- dev->pdev->subsystem_device == 0x013a) {
- if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
- *legacy_connector = CONNECTOR_CRT_LEGACY;
-
- }
-
/* X300 card with extra non-existent DVI port */
if (dev->pdev->device == 0x5B60 &&
dev->pdev->subsystem_vendor == 0x17af &&
@@ -1909,9 +2018,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
struct radeon_i2c_bus_rec ddc_i2c;
struct radeon_hpd hpd;
- if (rdev->bios == NULL)
- return false;
-
conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
if (conn_info) {
for (i = 0; i < 4; i++) {
@@ -1943,6 +2049,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
+ ddc_i2c.valid = false;
break;
}
@@ -2115,7 +2222,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- hpd.hpd = RADEON_HPD_NONE;
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
@@ -2256,6 +2363,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
hpd.hpd = RADEON_HPD_NONE;
+ ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2278,6 +2386,114 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 offset, misc, misc2 = 0;
+ u8 rev, blocks, tmp;
+ int state_index = 0;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ blocks = RBIOS8(offset + 0x2);
+ /* power mode 0 tends to be the only valid one */
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ goto default_mode;
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ misc = RBIOS16(offset + 0x5 + 0x0);
+ if (rev > 4)
+ misc2 = RBIOS16(offset + 0x5 + 0xe);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ if (misc & 0x4) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+ if (misc & 0x8)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+ if (rev < 6) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(offset + 0x5 + 0xb) * 4;
+ tmp = RBIOS8(offset + 0x5 + 0xd);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else {
+ u8 entries = RBIOS8(offset + 0x5 + 0xb);
+ u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+ if (entries && voltage_table_offset) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(voltage_table_offset) * 4;
+ tmp = RBIOS8(voltage_table_offset + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+ }
+ switch ((misc2 & 0x700) >> 8) {
+ case 0:
+ default:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+ break;
+ case 1:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+ break;
+ case 4:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+ break;
+ }
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rev > 6)
+ rdev->pm.power_state[state_index].pcie_lanes =
+ RBIOS8(offset + 0x5 + 0x10);
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ state_index++;
+ } else {
+ /* XXX figure out some good default low power mode for mobility cards w/out power tables */
+ }
+ } else {
+ /* XXX figure out some good default low power mode for desktop cards */
+ }
+
+default_mode:
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+ if ((state_index > 0) &&
+ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+ rdev->pm.power_state[state_index].clock_info[0].voltage =
+ rdev->pm.power_state[0].clock_info[0].voltage;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.power_state[state_index].flags = 0;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = state_index + 1;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+}
+
void radeon_external_tmds_setup(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2505,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
switch (tmds->dvo_chip) {
case DVO_SIL164:
/* sil 164 */
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x08, 0x30);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x09, 0x00);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0a, 0x90);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0c, 0x89);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x08, 0x3b);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
break;
case DVO_SIL1178:
/* sil 1178 - untested */
@@ -2338,9 +2552,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
uint32_t reg, val, and_mask, or_mask;
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
- if (rdev->bios == NULL)
- return false;
-
if (!tmds)
return false;
@@ -2390,11 +2601,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
index++;
val = RBIOS8(index);
index++;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2656,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
reg = id & 0x1fff;
val = RBIOS8(index);
index += 1;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2835,6 +3042,22 @@ void radeon_combios_asic_init(struct drm_device *dev)
combios_write_ram_size(dev);
}
+ /* quirk for rs4xx HP nx6125 laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x308b)
+ return;
+
+ /* quirk for rs4xx HP dv5000 laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x30a4)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f399..adccbc2c202c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_connector *conflict;
+ struct radeon_connector *radeon_conflict;
int i;
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
if (conflict == connector)
continue;
+ radeon_conflict = to_radeon_connector(conflict);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (conflict->encoder_ids[i] == 0)
break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
if (conflict->status != connector_status_connected)
continue;
+ if (radeon_conflict->use_digital)
+ continue;
+
if (priority == true) {
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
if (property == rdev->mode_info.coherent_mode_property) {
struct radeon_encoder_atom_dig *dig;
+ bool new_coherent_mode;
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
dig = radeon_encoder->enc_priv;
- dig->coherent_mode = val ? true : false;
- radeon_property_change_mode(&radeon_encoder->base);
+ new_coherent_mode = val ? true : false;
+ if (dig->coherent_mode != new_coherent_mode) {
+ dig->coherent_mode = new_coherent_mode;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
}
if (property == rdev->mode_info.tv_std_property) {
@@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
radeon_encoder = to_radeon_encoder(encoder);
if (!radeon_encoder->enc_priv)
return 0;
- if (rdev->is_atom_bios) {
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
struct radeon_encoder_atom_dac *dac_int;
dac_int = radeon_encoder->enc_priv;
dac_int->tv_std = val;
@@ -479,10 +488,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -587,19 +594,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +746,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -774,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
} else
ret = connector_status_connected;
- /* multiple connectors on the same encoder with the same ddc line
- * This tends to be HDMI and DVI on the same encoder with the
- * same ddc line. If the edid says HDMI, consider the HDMI port
- * connected and the DVI port disconnected. If the edid doesn't
- * say HDMI, vice versa.
+ /* This gets complicated. We have boards with VGA + HDMI with a
+ * shared DDC line and we have boards with DVI-D + HDMI with a shared
+ * DDC line. The latter is more complex because with DVI<->HDMI adapters
+ * you don't really know what's connected to which port as both are digital.
*/
if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
if (connector == list_connector)
continue;
list_radeon_connector = to_radeon_connector(list_connector);
- if (radeon_connector->devices == list_radeon_connector->devices) {
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- ret = connector_status_disconnected;
- }
- } else {
- if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ if (list_radeon_connector->shared_ddc &&
+ (list_radeon_connector->ddc_bus->rec.i2c_id ==
+ radeon_connector->ddc_bus->rec.i2c_id)) {
+ /* cases where both connectors are digital */
+ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+ /* hpd is our only option in this case */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
@@ -988,12 +982,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
ret = connector_status_connected;
}
} else {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
return ret;
@@ -1046,7 +1038,6 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1081,9 +1072,7 @@ radeon_add_atom_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1093,12 +1082,11 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1118,9 +1106,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1146,9 +1132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
@@ -1168,9 +1152,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1196,9 +1178,7 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1216,9 +1196,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1231,6 +1209,12 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
@@ -1255,7 +1239,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1283,9 +1266,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1295,12 +1276,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1314,13 +1294,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
+ }
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1333,9 +1313,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
@@ -1354,9 +1332,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1369,6 +1345,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d31..2f042a3c0e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
return -EBUSY;
}
-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
+static void radeon_init_pipes(struct drm_device *dev)
{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
uint32_t gb_tile_config, gb_pipe_sel = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -434,13 +435,19 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+ /* SE cards have 1 pipe */
+ if ((dev->pdev->device == 0x5e4c) ||
+ (dev->pdev->device == 0x5e4f))
+ dev_priv->num_gb_pipes = 1;
} else {
/* R3xx */
- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+ dev->pdev->device != 0x4144) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+ dev->pdev->device != 0x4148)) {
dev_priv->num_gb_pipes = 2;
} else {
- /* R3Vxx */
+ /* RV3xx/R300 AD/R350 AH */
dev_priv->num_gb_pipes = 1;
}
}
@@ -736,7 +743,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
/* setup the raster pipes */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
- radeon_init_pipes(dev_priv);
+ radeon_init_pipes(dev);
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
@@ -1644,6 +1651,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+ dev_priv->have_z_offset = 0;
radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1f..ae0fb7356e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,11 +193,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
radeon_bo_list_unreserve(&parser->validated);
- for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj) {
- mutex_lock(&parser->rdev->ddev->struct_mutex);
- drm_gem_object_unreference(parser->relocs[i].gobj);
- mutex_unlock(&parser->rdev->ddev->struct_mutex);
+ if (parser->relocs != NULL) {
+ for (i = 0; i < parser->nrelocs; i++) {
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
}
kfree(parser->track);
@@ -221,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
- if (rdev->gpu_lockup) {
- mutex_unlock(&rdev->cs_mutex);
- return -EINVAL;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -246,7 +241,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
r = radeon_cs_parser_relocs(&parser);
if (r) {
- DRM_ERROR("Failed to parse relocation !\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a37009c..4eb67c0e0996 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
- (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ } else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,19 +186,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
unpin:
if (radeon_crtc->cursor_bo) {
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(radeon_crtc->cursor_bo);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
return 0;
fail:
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
- return 0;
+ return ret;
}
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
yorigin = CURSOR_HEIGHT - 1;
radeon_lock_cursor(crtc, true);
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ /* cursors are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ /* XXX: check if evergreen has the same issues as avivo chips */
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
int w = radeon_crtc->cursor_width;
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b1509fa03..dd279da90546 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,15 +26,64 @@
* Jerome Glisse
*/
#include <linux/console.h>
+#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
-#include "radeon_asic.h"
#include "atom.h"
+static const char radeon_family_name[][16] = {
+ "R100",
+ "RV100",
+ "RS100",
+ "RV200",
+ "RS200",
+ "R200",
+ "RV250",
+ "RS300",
+ "RV280",
+ "R300",
+ "R350",
+ "RV350",
+ "RV380",
+ "R420",
+ "R423",
+ "RV410",
+ "RS400",
+ "RS480",
+ "RS600",
+ "RS690",
+ "RS740",
+ "RV515",
+ "R520",
+ "RV530",
+ "RV560",
+ "RV570",
+ "R580",
+ "R600",
+ "RV610",
+ "RV630",
+ "RV670",
+ "RV620",
+ "RV635",
+ "RS780",
+ "RS880",
+ "RV770",
+ "RV730",
+ "RV710",
+ "RV740",
+ "CEDAR",
+ "REDWOOD",
+ "JUNIPER",
+ "CYPRESS",
+ "HEMLOCK",
+ "LAST",
+};
+
/*
* Clear GPU surface registers.
*/
@@ -100,80 +149,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
-/*
- * MC common functions
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
*/
-int radeon_mc_setup(struct radeon_device *rdev)
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+ mc->vram_start = base;
+ if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
- uint32_t tmp;
+ u64 size_af, size_bf;
- /* Some chips have an "issue" with the memory controller, the
- * location must be aligned to the size. We just align it down,
- * too bad if we walk over the top of system memory, we don't
- * use DMA without a remapped anyway.
- * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
- */
- /* FGLRX seems to setup like this, VRAM a 0, then GART.
- */
- /*
- * Note: from R6xx the address space is 40bits but here we only
- * use 32bits (still have to see a card which would exhaust 4G
- * address space).
- */
- if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
- /* vram location was already setup try to put gtt after
- * if it fits */
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- rdev->mc.gtt_location = tmp;
- } else {
- if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
- printk(KERN_ERR "[drm] GTT too big to fit "
- "before or after vram location.\n");
- return -EINVAL;
- }
- rdev->mc.gtt_location = 0;
- }
- } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
- /* gtt location was already setup try to put vram before
- * if it fits */
- if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
- rdev->mc.vram_location = 0;
- } else {
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
- tmp += (rdev->mc.mc_vram_size - 1);
- tmp &= ~(rdev->mc.mc_vram_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
- rdev->mc.vram_location = tmp;
- } else {
- printk(KERN_ERR "[drm] vram too big to fit "
- "before or after GTT location.\n");
- return -EINVAL;
- }
+ size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_bf = mc->vram_start & ~mc->gtt_base_align;
+ if (size_bf > size_af) {
+ if (mc->gtt_size > size_bf) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_bf;
}
+ mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
} else {
- rdev->mc.vram_location = 0;
- tmp = rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location = tmp;
- }
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
- DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.vram_location,
- (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
- DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
- DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.gtt_location,
- (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
- return 0;
+ if (mc->gtt_size > size_af) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_af;
+ }
+ mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+ }
+ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
-
/*
* GPU helpers function.
*/
@@ -182,7 +254,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
RREG32(AVIVO_D2CRTC_CONTROL);
if (reg & AVIVO_CRTC_EN) {
@@ -209,6 +290,36 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+ fixed20_12 a;
+ u32 sclk, mclk;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = rdev->clock.default_mclk;
+
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
+ a.full = dfixed_const(16);
+ /* core_bandwidth = sclk(Mhz) * 16 */
+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+ } else {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = radeon_get_memory_clock(rdev);
+
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+ }
+}
+
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
@@ -229,6 +340,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
int radeon_dummy_page_init(struct radeon_device *rdev)
{
+ if (rdev->dummy_page.page)
+ return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
@@ -253,173 +366,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
}
-/*
- * Registers accessors functions.
- */
-uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
- BUG_ON(1);
- return 0;
-}
-
-void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
- BUG_ON(1);
-}
-
-void radeon_register_accessor_init(struct radeon_device *rdev)
-{
- rdev->mc_rreg = &radeon_invalid_rreg;
- rdev->mc_wreg = &radeon_invalid_wreg;
- rdev->pll_rreg = &radeon_invalid_rreg;
- rdev->pll_wreg = &radeon_invalid_wreg;
- rdev->pciep_rreg = &radeon_invalid_rreg;
- rdev->pciep_wreg = &radeon_invalid_wreg;
-
- /* Don't change order as we are overridding accessor. */
- if (rdev->family < CHIP_RV515) {
- rdev->pcie_reg_mask = 0xff;
- } else {
- rdev->pcie_reg_mask = 0x7ff;
- }
- /* FIXME: not sure here */
- if (rdev->family <= CHIP_R580) {
- rdev->pll_rreg = &r100_pll_rreg;
- rdev->pll_wreg = &r100_pll_wreg;
- }
- if (rdev->family >= CHIP_R420) {
- rdev->mc_rreg = &r420_mc_rreg;
- rdev->mc_wreg = &r420_mc_wreg;
- }
- if (rdev->family >= CHIP_RV515) {
- rdev->mc_rreg = &rv515_mc_rreg;
- rdev->mc_wreg = &rv515_mc_wreg;
- }
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
- rdev->mc_rreg = &rs400_mc_rreg;
- rdev->mc_wreg = &rs400_mc_wreg;
- }
- if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- rdev->mc_rreg = &rs690_mc_rreg;
- rdev->mc_wreg = &rs690_mc_wreg;
- }
- if (rdev->family == CHIP_RS600) {
- rdev->mc_rreg = &rs600_mc_rreg;
- rdev->mc_wreg = &rs600_mc_wreg;
- }
- if (rdev->family >= CHIP_R600) {
- rdev->pciep_rreg = &r600_pciep_rreg;
- rdev->pciep_wreg = &r600_pciep_wreg;
- }
-}
-
-
-/*
- * ASIC
- */
-int radeon_asic_init(struct radeon_device *rdev)
-{
- radeon_register_accessor_init(rdev);
- switch (rdev->family) {
- case CHIP_R100:
- case CHIP_RV100:
- case CHIP_RS100:
- case CHIP_RV200:
- case CHIP_RS200:
- case CHIP_R200:
- case CHIP_RV250:
- case CHIP_RS300:
- case CHIP_RV280:
- rdev->asic = &r100_asic;
- break;
- case CHIP_R300:
- case CHIP_R350:
- case CHIP_RV350:
- case CHIP_RV380:
- rdev->asic = &r300_asic;
- if (rdev->flags & RADEON_IS_PCIE) {
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- }
- break;
- case CHIP_R420:
- case CHIP_R423:
- case CHIP_RV410:
- rdev->asic = &r420_asic;
- break;
- case CHIP_RS400:
- case CHIP_RS480:
- rdev->asic = &rs400_asic;
- break;
- case CHIP_RS600:
- rdev->asic = &rs600_asic;
- break;
- case CHIP_RS690:
- case CHIP_RS740:
- rdev->asic = &rs690_asic;
- break;
- case CHIP_RV515:
- rdev->asic = &rv515_asic;
- break;
- case CHIP_R520:
- case CHIP_RV530:
- case CHIP_RV560:
- case CHIP_RV570:
- case CHIP_R580:
- rdev->asic = &r520_asic;
- break;
- case CHIP_R600:
- case CHIP_RV610:
- case CHIP_RV630:
- case CHIP_RV620:
- case CHIP_RV635:
- case CHIP_RV670:
- case CHIP_RS780:
- case CHIP_RS880:
- rdev->asic = &r600_asic;
- break;
- case CHIP_RV770:
- case CHIP_RV730:
- case CHIP_RV710:
- case CHIP_RV740:
- rdev->asic = &rv770_asic;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- if (rdev->flags & RADEON_IS_IGP) {
- rdev->asic->get_memory_clock = NULL;
- rdev->asic->set_memory_clock = NULL;
- }
-
- return 0;
-}
-
-
-/*
- * Wrapper around modesetting bits.
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_static_clocks_init(rdev->ddev);
- if (r) {
- return r;
- }
- DRM_INFO("Clocks initialized !\n");
- return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
-
/* ATOM accessor methods */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
@@ -524,29 +470,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-void radeon_agp_disable(struct radeon_device *rdev)
-{
- rdev->flags &= ~RADEON_IS_AGP;
- if (rdev->family >= CHIP_R600) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- } else if (rdev->family >= CHIP_RV515 ||
- rdev->family == CHIP_RV380 ||
- rdev->family == CHIP_RV410 ||
- rdev->family == CHIP_R423) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- } else {
- DRM_INFO("Forcing AGP to PCI mode\n");
- rdev->flags |= RADEON_IS_PCI;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
- }
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-}
-
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
@@ -613,6 +536,38 @@ void radeon_check_arguments(struct radeon_device *rdev)
}
}
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "radeon: switched on\n");
+ /* don't suspend or resume card normally */
+ rdev->powered_down = false;
+ radeon_resume_kms(dev);
+ drm_kms_helper_poll_enable(dev);
+ } else {
+ printk(KERN_INFO "radeon: switched off\n");
+ drm_kms_helper_poll_disable(dev);
+ radeon_suspend_kms(dev, pmm);
+ /* don't suspend or resume card normally */
+ rdev->powered_down = true;
+ }
+}
+
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
+
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -621,7 +576,6 @@ int radeon_device_init(struct radeon_device *rdev,
int r;
int dma_bits;
- DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
rdev->dev = &pdev->dev;
rdev->ddev = ddev;
@@ -633,16 +587,25 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->gpu_lockup = false;
rdev->accel_working = false;
+
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device);
+
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
+ mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ init_waitqueue_head(&rdev->irq.vblank_queue);
+ init_waitqueue_head(&rdev->irq.idle_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -655,6 +618,14 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
radeon_check_arguments(rdev);
+ /* all of the newer IGP chips have an internal gart
+ * However some rs4xx report as AGP, so remove that here.
+ */
+ if ((rdev->family >= CHIP_RS400) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ rdev->flags &= ~RADEON_IS_AGP;
+ }
+
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
@@ -692,6 +663,9 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ vga_switcheroo_register_client(rdev->pdev,
+ radeon_switcheroo_set_state,
+ radeon_switcheroo_can_switch);
r = radeon_init(rdev);
if (r)
@@ -701,7 +675,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@@ -721,8 +695,11 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
+ vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -736,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
+ struct drm_connector *connector;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -746,6 +724,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
+
+ /* turn off display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -755,9 +741,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_rbo) {
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
- if (unlikely(r == 0)) {
+ if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
@@ -770,11 +757,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_save_bios_scratch_regs(rdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);
+ radeon_agp_suspend(rdev);
+
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@@ -782,15 +772,19 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
acquire_console_sem();
- fb_set_suspend(rdev->fbdev_info, 1);
+ radeon_fbdev_set_suspend(rdev, 1);
release_console_sem();
return 0;
}
int radeon_resume_kms(struct drm_device *dev)
{
+ struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
+
acquire_console_sem();
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
@@ -802,8 +796,15 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+ radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- fb_set_suspend(rdev->fbdev_info, 0);
+
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+
+ radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
/* reset hpd state */
@@ -813,6 +814,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_save_bios_scratch_regs(rdev);
+ radeon_suspend(rdev);
+
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ radeon_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+ return 0;
+ }
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ return r;
+}
+
/*
* Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a362b54b..8154cdf796e4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
+static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_crtc_load_lut(crtc);
+ else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
legacy_crtc_load_lut(crtc);
@@ -252,8 +284,7 @@ static const char *connector_names[15] = {
"eDP",
};
-static const char *hpd_names[7] = {
- "NONE",
+static const char *hpd_names[6] = {
"HPD1",
"HPD2",
"HPD3",
@@ -336,10 +367,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (rdev->bios) {
if (rdev->is_atom_bios) {
- if (rdev->family >= CHIP_R600)
+ ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+ if (ret == false)
ret = radeon_get_atom_connector_info_from_object_table(dev);
- else
- ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
if (ret == false)
@@ -361,6 +391,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
int ret = 0;
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +405,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
-
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ if (!radeon_connector->edid)
+ radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +427,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -414,13 +444,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -437,10 +467,19 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t best_error = 0xffffffff;
uint32_t best_vco_diff = 1;
uint32_t post_div;
+ u32 pll_out_min, pll_out_max;
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
+
if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
@@ -504,10 +543,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp = (uint64_t)pll->reference_freq * feedback_div;
vco = radeon_div(tmp, ref_div);
- if (vco < pll->pll_out_min) {
+ if (vco < pll_out_min) {
min_feed_div = feedback_div + 1;
continue;
- } else if (vco > pll->pll_out_max) {
+ } else if (vco > pll_out_max) {
max_feed_div = feedback_div;
continue;
}
@@ -580,110 +619,220 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
-void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static bool
+calc_fb_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t ref_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac)
{
- fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
- fixed20_12 pll_out_max, pll_out_min;
- fixed20_12 pll_in_max, pll_in_min;
- fixed20_12 reference_freq;
- fixed20_12 error, ffreq, a, b;
-
- pll_out_max.full = rfixed_const(pll->pll_out_max);
- pll_out_min.full = rfixed_const(pll->pll_out_min);
- pll_in_max.full = rfixed_const(pll->pll_in_max);
- pll_in_min.full = rfixed_const(pll->pll_in_min);
- reference_freq.full = rfixed_const(pll->reference_freq);
- do_div(freq, 10);
- ffreq.full = rfixed_const(freq);
- error.full = rfixed_const(100 * 100);
+ fixed20_12 feedback_divider, a, b;
+ u32 vco_freq;
- /* max p */
- p.full = rfixed_div(pll_out_max, ffreq);
- p.full = rfixed_floor(p);
+ vco_freq = freq * post_div;
+ /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
+ a.full = dfixed_const(pll->reference_freq);
+ feedback_divider.full = dfixed_const(vco_freq);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
+ a.full = dfixed_const(ref_div);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
- /* min m */
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
+ a.full = dfixed_const(10);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
+
+ /* *fb_div = floor(feedback_divider); */
+ a.full = dfixed_floor(feedback_divider);
+ *fb_div = dfixed_trunc(a);
+ /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
+ a.full = dfixed_const(10);
+ b.full = dfixed_mul(feedback_divider, a);
+
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
+ feedback_divider.full = b.full - feedback_divider.full;
+ *fb_div_frac = dfixed_trunc(feedback_divider);
+ } else {
+ /* *fb_div = floor(feedback_divider + 0.5); */
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
- while (1) {
- n.full = rfixed_div(ffreq, reference_freq);
- n.full = rfixed_mul(n, m);
- n.full = rfixed_mul(n, p);
+ *fb_div = dfixed_trunc(feedback_divider);
+ *fb_div_frac = 0;
+ }
- f_vco.full = rfixed_div(n, m);
- f_vco.full = rfixed_mul(f_vco, reference_freq);
+ if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
+ return false;
+ else
+ return true;
+}
- f_pclk.full = rfixed_div(f_vco, p);
+static bool
+calc_fb_ref_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac,
+ uint32_t *ref_div)
+{
+ fixed20_12 ffreq, max_error, error, pll_out, a;
+ u32 vco;
+ u32 pll_out_min, pll_out_max;
- if (f_pclk.full > ffreq.full)
- error.full = f_pclk.full - ffreq.full;
- else
- error.full = ffreq.full - f_pclk.full;
- error.full = rfixed_div(error, f_pclk);
- a.full = rfixed_const(100 * 100);
- error.full = rfixed_mul(error, a);
-
- a.full = rfixed_mul(m, p);
- a.full = rfixed_div(n, a);
- best_freq.full = rfixed_mul(reference_freq, a);
-
- if (rfixed_trunc(error) < 25)
- break;
-
- a.full = rfixed_const(1);
- m.full = m.full + a.full;
- a.full = rfixed_div(reference_freq, m);
- if (a.full >= pll_in_min.full)
- continue;
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
- a.full= rfixed_const(1);
- p.full = p.full - a.full;
- a.full = rfixed_mul(p, ffreq);
- if (a.full >= pll_out_min.full)
- continue;
- else {
- DRM_ERROR("Unable to find pll dividers\n");
- break;
+ ffreq.full = dfixed_const(freq);
+ /* max_error = ffreq * 0.0025; */
+ a.full = dfixed_const(400);
+ max_error.full = dfixed_div(ffreq, a);
+
+ for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
+ if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
+ vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
+ vco = vco / ((*ref_div) * 10);
+
+ if ((vco < pll_out_min) || (vco > pll_out_max))
+ continue;
+
+ /* pll_out = vco / post_div; */
+ a.full = dfixed_const(post_div);
+ pll_out.full = dfixed_const(vco);
+ pll_out.full = dfixed_div(pll_out, a);
+
+ if (pll_out.full >= ffreq.full) {
+ error.full = pll_out.full - ffreq.full;
+ if (error.full <= max_error.full)
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void radeon_compute_pll_new(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
+ u32 best_freq = 0, vco_frequency;
+ u32 pll_out_min, pll_out_max;
+
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
+
+ /* freq = freq / 10; */
+ do_div(freq, 10);
+
+ if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+ post_div = pll->post_div;
+ if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
+ goto done;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
+ goto done;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ goto done;
+ }
+ } else {
+ for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
+ if ((post_div == 5) ||
+ (post_div == 7) ||
+ (post_div == 9) ||
+ (post_div == 10) ||
+ (post_div == 11))
+ continue;
+ }
+
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ continue;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
+ continue;
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ break;
+ } else {
+ if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
+ break;
+ }
}
}
- a.full = rfixed_const(10);
- b.full = rfixed_mul(n, a);
+ best_freq = pll->reference_freq * 10 * fb_div;
+ best_freq += pll->reference_freq * fb_div_frac;
+ best_freq = best_freq / (ref_div * post_div);
+
+done:
+ if (best_freq == 0)
+ DRM_ERROR("Couldn't find valid PLL dividers\n");
- frac_n.full = rfixed_floor(n);
- frac_n.full = rfixed_mul(frac_n, a);
- frac_n.full = b.full - frac_n.full;
+ *dot_clock_p = best_freq / 10;
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = fb_div_frac;
+ *ref_div_p = ref_div;
+ *post_div_p = post_div;
- *dot_clock_p = rfixed_trunc(best_freq);
- *fb_div_p = rfixed_trunc(n);
- *frac_fb_div_p = rfixed_trunc(frac_n);
- *ref_div_p = rfixed_trunc(m);
- *post_div_p = rfixed_trunc(p);
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
- DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ switch (pll->algo) {
+ case PLL_ALGO_NEW:
+ radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ case PLL_ALGO_LEGACY:
+ default:
+ radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ }
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- struct drm_device *dev = fb->dev;
- if (fb->fbdev)
- radeonfb_remove(dev, fb);
-
- if (radeon_fb->obj) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(radeon_fb->obj);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (radeon_fb->obj)
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -702,21 +851,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct radeon_framebuffer *radeon_fb;
-
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
- radeon_fb->obj = obj;
- return &radeon_fb->base;
+ rfb->obj = obj;
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
static struct drm_framebuffer *
@@ -725,6 +868,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
@@ -732,12 +876,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
- return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+ return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_fb_output_poll_changed(rdev);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .fb_changed = radeonfb_probe,
+ .output_poll_changed = radeon_output_poll_changed
};
struct drm_prop_enum_list {
@@ -817,9 +975,29 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
return 0;
}
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+ /* adjustment options for the display watermarks */
+ if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+ /* set display priority to high for r3xx, rv515 chips
+ * this avoids flickering due to underflow to the
+ * display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
+ */
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
+ rdev->disp_priority = 2;
+ else
+ rdev->disp_priority = 0;
+ } else
+ rdev->disp_priority = radeon_disp_priority;
+
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
- int num_crtc = 2, i;
+ int i;
int ret;
drm_mode_config_init(rdev->ddev);
@@ -842,11 +1020,14 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
- if (rdev->flags & RADEON_SINGLE_CRTC)
- num_crtc = 1;
+ /* check combios for a valid hardcoded EDID - Sun servers */
+ if (!rdev->is_atom_bios) {
+ /* check for hardcoded EDID in BIOS */
+ radeon_combios_check_hardcoded_edid(rdev);
+ }
/* allocate crtcs */
- for (i = 0; i < num_crtc; i++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
}
@@ -857,13 +1038,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
}
/* initialize hpd */
radeon_hpd_init(rdev);
- drm_helper_initial_config(rdev->ddev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ radeon_fbdev_fini(rdev);
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
+
if (rdev->mode_info.mode_config_initialized) {
+ drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
@@ -913,15 +1105,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
- radeon_crtc->vsc.full = rfixed_div(a, b);
- a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
- radeon_crtc->hsc.full = rfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.vdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ radeon_crtc->vsc.full = dfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.hdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
- radeon_crtc->vsc.full = rfixed_const(1);
- radeon_crtc->hsc.full = rfixed_const(1);
+ radeon_crtc->vsc.full = dfixed_const(1);
+ radeon_crtc->hsc.full = dfixed_const(1);
}
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7994d4..e166fe4d7c30 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,15 @@
/*
* KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 0
+#define KMS_DRIVER_MINOR 5
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,8 +92,10 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_new_pll = 1;
+int radeon_new_pll = -1;
int radeon_audio = 1;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -122,12 +130,18 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -200,6 +214,7 @@ static struct drm_driver driver_old = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
#endif
@@ -288,6 +303,7 @@ static struct drm_driver kms_driver = {
.mmap = radeon_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_kms_compat_ioctl,
#endif
@@ -339,6 +355,7 @@ static int __init radeon_init(void)
driver = &kms_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
+ radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
@@ -348,6 +365,7 @@ static int __init radeon_init(void)
static void __exit radeon_exit(void)
{
drm_exit(driver);
+ radeon_unregister_atpx_handler();
}
module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad606504d..448eba89d1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -107,9 +107,10 @@
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 32
+#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {
@@ -268,6 +269,8 @@ typedef struct drm_radeon_private {
u32 scratch_ages[5];
+ int have_z_offset;
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
resource_size_t fb_aper_offset;
@@ -295,6 +298,9 @@ typedef struct drm_radeon_private {
int r700_sc_prim_fifo_size;
int r700_sc_hiz_tile_fifo_size;
int r700_sc_earlyz_tile_fifo_fize;
+ int r600_group_size;
+ int r600_npipes;
+ int r600_nbanks;
struct mutex cs_mutex;
u32 cs_id_scnt;
@@ -310,9 +316,11 @@ typedef struct drm_radeon_buf_priv {
u32 age;
} drm_radeon_buf_priv_t;
+struct drm_buffer;
+
typedef struct drm_radeon_kcmd_buffer {
int bufsz;
- char *buf;
+ struct drm_buffer *buffer;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
@@ -455,6 +463,15 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2139,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
write &= mask; \
} while (0)
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do { \
+ int _size = (sz) * 4; \
+ struct drm_buffer *_buf = (buf); \
+ int _part_size; \
+ while (_size > 0) { \
+ _part_size = _size; \
+ \
+ if (write + _part_size/4 > mask) \
+ _part_size = ((mask + 1) - write)*4; \
+ \
+ if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
+ _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+ \
+ \
+ \
+ memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
+ [drm_buffer_index(_buf)], _part_size); \
+ \
+ _size -= _part_size; \
+ write = (write + _part_size/4) & mask; \
+ drm_buffer_advance(_buf, _part_size); \
+ } \
+} while (0)
+
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457ca..e0b30b264c28 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
/* DVO requires 2x ppll clocks depending on tmds chip */
if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
return index_mask;
-
+
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,79 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+static struct radeon_connector_atom_dig *
+radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!rdev->is_atom_bios)
+ return NULL;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return NULL;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_connector->con_priv)
+ return NULL;
+
+ dig_connector = radeon_connector->con_priv;
+
+ return dig_connector;
+}
+
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+ adjusted_mode->clock = native_mode->clock;
+ adjusted_mode->flags = native_mode->flags;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = native_mode->hdisplay;
+ adjusted_mode->vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->htotal = native_mode->hdisplay + hblank;
+ adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+ adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+ adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -246,18 +319,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
/* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- if (!ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- }
- adjusted_mode->base.id = mode_id;
- }
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
@@ -273,7 +336,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
if (ASIC_IS_DCE3(rdev) &&
- (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
@@ -288,12 +351,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0, num = 0;
+ int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
- enum radeon_tv_std tv_std = TV_STD_NTSC;
-
- if (dac_info->tv_std)
- tv_std = dac_info->tv_std;
memset(&args, 0, sizeof(args));
@@ -301,12 +360,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
- num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
- num = 2;
break;
}
@@ -317,7 +374,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.ucDacStandard = ATOM_DAC1_CV;
else {
- switch (tv_std) {
+ switch (dac_info->tv_std) {
case TV_STD_PAL:
case TV_STD_PAL_M:
case TV_STD_SCART_PAL:
@@ -348,10 +405,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
TV_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
- enum radeon_tv_std tv_std = TV_STD_NTSC;
-
- if (dac_info->tv_std)
- tv_std = dac_info->tv_std;
memset(&args, 0, sizeof(args));
@@ -362,7 +415,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
else {
- switch (tv_std) {
+ switch (dac_info->tv_std) {
case TV_STD_NTSC:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
@@ -458,34 +511,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
+ if (!dig || !dig_connector)
return;
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
- dig_connector = radeon_connector->con_priv;
-
memset(&args, 0, sizeof(args));
switch (radeon_encoder->encoder_id) {
@@ -504,7 +543,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
switch (frev) {
case 1:
@@ -578,7 +618,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- r600_hdmi_enable(encoder, hdmi_detected);
}
int
@@ -586,7 +625,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *radeon_dig_connector;
+ struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -617,9 +656,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- radeon_dig_connector = radeon_connector->con_priv;
- if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +695,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
+ * DCE 4.0
+ * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -664,88 +715,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
-static void
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+};
+
+void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DIG_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0, num = 0;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ union dig_encoder_control args;
+ int index = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return;
+ memset(&args, 0, sizeof(args));
- dig_connector = radeon_connector->con_priv;
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
- if (!radeon_encoder->enc_priv)
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
- dig = radeon_encoder->enc_priv;
-
- memset(&args, 0, sizeof(args));
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (dig->dig_encoder)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucLaneNum = 8;
else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = dig->dig_encoder + 1;
-
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
-
- args.ucAction = action;
- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucLaneNum = 4;
- if (ASIC_IS_DCE32(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ } else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
- break;
- }
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
}
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
- args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.ucLaneNum = dig_connector->dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
- args.ucLaneNum = 8;
- else
- args.ucLaneNum = 4;
-
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -753,6 +794,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
};
void
@@ -761,37 +803,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- union dig_transmitter_control args;
- int index = 0, num = 0;
- uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
+ union dig_transmitter_control args;
+ int index = 0;
+ uint8_t frev, crev;
bool is_dp = false;
+ int pll_id = 0;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
+ connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
- return;
-
- dig_connector = radeon_connector->con_priv;
-
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev))
+ if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
else {
switch (radeon_encoder->encoder_id) {
@@ -804,7 +838,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
}
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
@@ -821,24 +856,66 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_encoder == 1)
- args.v2.acConfig.ucEncoderSel = 1;
+ if (ASIC_IS_DCE4(rdev)) {
+ if (is_dp)
+ args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig_connector->linkb) {
+ args.v3.acConfig.ucLinkSel = 1;
+ args.v3.acConfig.ucEncoderSel = 1;
+ }
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v3.acConfig.fDualLinkConnector = 1;
+ }
+ } else if (ASIC_IS_DCE32(rdev)) {
+ args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
- num = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
- num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
- num = 2;
break;
}
@@ -847,9 +924,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.acConfig.fDualLinkConnector = 1;
}
} else {
-
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
if (dig->dig_encoder)
@@ -857,31 +935,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (rdev->flags & RADEON_IS_IGP) {
- if (radeon_encoder->pixel_clock > 165000) {
- if (dig_connector->igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (dig_connector->igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- } else {
- if (dig_connector->igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (dig_connector->igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (dig_connector->igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (dig_connector->igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- }
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+ if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+ if (dig_connector->igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (dig_connector->igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (dig_connector->igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (dig_connector->igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ } else {
+ if (dig_connector->igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (dig_connector->igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
- break;
}
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
-
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
@@ -892,6 +964,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
}
@@ -998,16 +1072,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
- {
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
+ }
break;
}
} else {
@@ -1024,9 +1107,10 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
-union crtc_sourc_param {
+union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
@@ -1038,14 +1122,15 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- union crtc_sourc_param args;
+ union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args));
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
switch (frev) {
case 1:
@@ -1107,10 +1192,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
- if (dig->dig_encoder)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
+ switch (dig->dig_encoder) {
+ case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1141,6 +1242,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* update scratch regs with new routing */
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
static void
@@ -1167,6 +1271,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
+ /* XXX check DCE4 */
if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1288,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig_connector->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig_connector->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig_connector->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
@@ -1223,20 +1355,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- if (radeon_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
- }
radeon_encoder->pixel_clock = adjusted_mode->clock;
- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
- atombios_set_encoder_crtc_source(encoder);
-
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
@@ -1254,15 +1376,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
+
+ /* init and enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1276,13 +1409,20 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- atombios_tv_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ r600_hdmi_enable(encoder);
+ r600_hdmi_setmode(encoder, adjusted_mode);
+ }
}
static bool
@@ -1302,7 +1442,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
memset(&args, 0, sizeof(args));
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return false;
args.sDacload.ucMisc = 0;
@@ -1376,8 +1517,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ }
+
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ atombios_set_encoder_crtc_source(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1388,11 +1541,52 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
@@ -1433,12 +1627,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
if (!dac)
return NULL;
- dac->tv_std = TV_STD_NTSC;
+ dac->tv_std = radeon_atombios_get_tv_info(rdev);
return dac;
}
@@ -1480,10 +1676,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
return;
encoder = &radeon_encoder->base;
- if (rdev->flags & RADEON_SINGLE_CRTC)
+ switch (rdev->num_crtc) {
+ case 1:
encoder->possible_crtcs = 0x1;
- else
+ break;
+ case 2:
+ default:
encoder->possible_crtcs = 0x3;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
radeon_encoder->enc_priv = NULL;
@@ -1508,6 +1712,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
@@ -1535,6 +1740,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
-
- r600_hdmi_init(encoder);
}
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e344a6..e329066dcabd 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -36,7 +36,7 @@
* Radeon chip families
*/
enum radeon_family {
- CHIP_R100,
+ CHIP_R100 = 0,
CHIP_RV100,
CHIP_RS100,
CHIP_RV200,
@@ -75,6 +75,11 @@ enum radeon_family {
CHIP_RV730,
CHIP_RV710,
CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
CHIP_LAST,
};
@@ -94,4 +99,5 @@ enum radeon_chip_flags {
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
};
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346e9ab5..dc1634bb0c11 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,11 +23,8 @@
* Authors:
* David Airlie
*/
- /*
- * Modularization
- */
-
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
@@ -39,17 +36,23 @@
#include "drm_fb_helper.h"
-struct radeon_fb_device {
+#include <linux/vga_switcheroo.h>
+
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
- struct radeon_device *rdev;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -58,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (fb == NULL) {
- return 1;
- }
- info = fb->fbdev;
- if (info == NULL) {
- return 1;
- }
- if (mode == NULL) {
- return 1;
- }
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -122,58 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
return aligned;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gobj->driver_private;
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
-int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object **gobj_p)
{
- struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
- struct radeon_framebuffer *rfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
- int size, aligned_size, ret;
- u64 fb_gpuaddr;
- void *fbptr = NULL;
- unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
- int crtc_count;
-
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- /* avivo can't scanout real 24bpp */
- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
- surface_bpp = 32;
+ int ret;
+ int aligned_size, size;
- mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
- mode_cmd.depth = surface_depth;
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
-
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
- surface_width, surface_height);
- ret = -ENOMEM;
- goto out;
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
}
rbo = gobj->driver_private;
@@ -181,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd.bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -194,61 +144,81 @@ int radeonfb_create(struct drm_device *dev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
- tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd.pitch);
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
- mutex_lock(&rdev->ddev->struct_mutex);
- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
- if (fb == NULL) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_unref;
- }
+
+
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
- ret = radeon_bo_kmap(rbo, &fbptr);
+ ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
- *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_rbo = rbo;
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ rbo = gobj->driver_private;
- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
- rdev->fbdev_info = info;
- rfbdev = info->par;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- rfbdev->helper.dev = dev;
- if (rdev->flags & RADEON_SINGLE_CRTC)
- crtc_count = 1;
- else
- crtc_count = 2;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
- RADEONFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ info->par = rfbdev;
+
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+
+ fb = &rfbdev->rfb.base;
- memset_io(fbptr, 0x0, aligned_size);
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
@@ -257,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_location;
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = rdev->ddev->mode_config.fb_base;
- info->aperture_size = rdev->mc.real_vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = rdev->mc.real_vram_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
@@ -276,43 +251,55 @@ int radeonfb_create(struct drm_device *dev,
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
- fb->fbdev = info;
- rfbdev->rfb = rfb;
- rfbdev->rdev = rdev;
-
- mutex_unlock(&rdev->ddev->struct_mutex);
+ vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
- ret = radeon_bo_reserve(rbo, false);
- if (likely(ret == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unreserve(rbo);
- }
+
}
if (fb && ret) {
- list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
-out:
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
static char *mode_option;
int radeon_parse_options(char *options)
{
@@ -329,46 +316,108 @@ int radeon_parse_options(char *options)
return 0;
}
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- struct radeon_device *rdev = dev->dev_private;
- int bpp_sel = 32;
-
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
- if (!fb) {
- return -EINVAL;
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
- info = fb->fbdev;
- if (info) {
- struct radeon_fb_device *rfbdev = info->par;
+
+ if (rfb->obj) {
rbo = rfb->obj->driver_private;
- unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
+ drm_gem_object_unreference_unlocked(rfb->obj);
}
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_cleanup(&rfb->base);
+
+ return 0;
+}
- printk(KERN_INFO "unregistered panic notifier\n");
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeon_fb_find_or_create_single,
+};
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+ int ret;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ if (ret) {
+ kfree(rfbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8495d4e32e18..b1f9a81b5d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -33,6 +33,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
@@ -57,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -70,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
+ unsigned long cjiffies;
- if (rdev == NULL) {
- return true;
- }
- if (rdev->shutdown) {
- return true;
- }
seq = RREG32(rdev->fence_drv.scratch_reg);
- rdev->fence_drv.last_seq = seq;
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ } else {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ /* update the timeout */
+ rdev->fence_drv.last_timeout -= cjiffies;
+ } else {
+ /* the 500ms timeout is elapsed we should test
+ * for GPU lockup
+ */
+ rdev->fence_drv.last_timeout = 1;
+ }
+ } else {
+ /* wrap around update last jiffies, we will just wait
+ * a little longer
+ */
+ rdev->fence_drv.last_jiffies = cjiffies;
+ }
+ return false;
+ }
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@@ -170,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
- unsigned long cur_jiffies;
- unsigned long timeout;
- bool expired = false;
+ unsigned long irq_flags, timeout;
+ u32 seq;
int r;
if (fence == NULL) {
@@ -183,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
-
+ timeout = rdev->fence_drv.last_timeout;
retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-
+ /* save current sequence used to check for GPU lockup */
+ seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
- if (unlikely(r < 0))
+ if (unlikely(r < 0)) {
return r;
+ }
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@@ -205,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
- if (unlikely(r == 0)) {
- expired = true;
+ /* we were interrupted for some reason and fence isn't
+ * isn't signaled yet, resume wait
+ */
+ if (r) {
+ timeout = r;
+ goto retry;
}
- if (unlikely(expired)) {
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- if (timeout > 500) {
- DRM_ERROR("fence(%p:0x%08X) %lums timeout "
- "going to reset GPU\n",
- fence, fence->seq, timeout);
- radeon_gpu_reset(rdev);
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- }
+ /* don't protect read access to rdev->fence_drv.last_seq
+ * if we experiencing a lockup the value doesn't change
+ */
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ /* good news we believe it's a lockup */
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ /* FIXME: what should we do ? marking everyone
+ * as signaled for now
+ */
+ rdev->gpu_lockup = true;
+ r = radeon_gpu_reset(rdev);
+ if (r)
+ return r;
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ rdev->gpu_lockup = false;
}
+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
- if (unlikely(expired)) {
- rdev->fence_drv.count_timeout++;
- cur_jiffies = jiffies;
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
- fence, fence->seq, timeout);
- DRM_ERROR("last signaled fence(0x%08X)\n",
- rdev->fence_drv.last_seq);
- }
return 0;
}
@@ -332,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e83fa6..e65b90317fab 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
+ u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = 0;
+ rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, 0);
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -170,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0;
}
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+ int i, j, t;
+ u64 page_base;
+
+ for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+ page_base = rdev->gart.pages_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
int radeon_gart_init(struct radeon_device *rdev)
{
+ int r, i;
+
if (rdev->gart.pages) {
return 0;
}
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ /* set GART entry to point to the dummy page by default */
+ for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a355a01..a72a3ee5d69b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_bo_unref(&robj);
}
+
+ drm_gem_object_release(gobj);
+ kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -69,9 +72,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- mutex_lock(&rdev->ddev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
gobj->driver_private = robj;
@@ -160,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- if (rdev->fbdev_rbo)
- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
@@ -202,14 +202,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
if (r) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle;
return 0;
}
@@ -236,9 +232,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -255,9 +249,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return 0;
}
@@ -288,9 +280,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -311,9 +301,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -331,9 +319,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -356,8 +342,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e89d00..5def6f5dff38 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "atom.h"
/**
* radeon_ddc_probe
@@ -58,8 +59,9 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
return false;
}
+/* bit banging i2c */
-void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +73,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
*/
if (rec->hw_capable) {
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ u32 reg;
+
+ if (rdev->family >= CHIP_RV350)
+ reg = RADEON_GPIO_MONID;
+ else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350))
+ reg = RADEON_GPIO_DVI_DDC;
+ else
+ reg = RADEON_GPIO_CRT2_DDC;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ if (rec->a_clk_reg == reg) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
} else {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
}
}
@@ -168,10 +182,692 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 1);
+
+ return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 0);
+}
+
+/* hw i2c */
+
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+ u32 sclk = radeon_get_engine_clock(rdev);
+ u32 prescale = 0;
+ u32 nm;
+ u8 n, m, loop;
+ int i2c_clock;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ i2c_clock = 60;
+ nm = (sclk * 10) / (i2c_clock * 4);
+ for (loop = 1; loop < 255; loop++) {
+ if ((nm / loop) < loop)
+ break;
+ }
+ n = loop - 1;
+ m = loop - 2;
+ prescale = m | (n << 8);
+ break;
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* todo */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ i2c_clock = 50;
+ if (rdev->family == CHIP_R520)
+ prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+ else
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* todo */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* todo */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* todo */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ break;
+ }
+ return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, k, ret = num;
+ u32 prescale;
+ u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+ u32 tmp, reg;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+ RADEON_I2C_DRIVE_EN |
+ RADEON_I2C_START |
+ RADEON_I2C_STOP |
+ RADEON_I2C_GO);
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ }
+
+ if (rec->mm_i2c) {
+ i2c_cntl_0 = RADEON_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_I2C_CNTL_1;
+ i2c_data = RADEON_I2C_DATA;
+ } else {
+ i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+ i2c_data = RADEON_DVI_I2C_DATA;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ /* no gpio select bit */
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R200:
+ /* only bit 4 on r200 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_CRT2_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ /* only bit 4 on r300/r350 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ default:
+ DRM_ERROR("unsupported asic\n");
+ ret = -EINVAL;
+ goto done;
+ break;
+ }
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, 0);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ for (j = 0; j < p->len; j++) {
+ if (p->flags & I2C_M_RD) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ p->buf[j] = RREG32(i2c_data) & 0xff;
+ } else {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, p->buf[j]);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+
+done:
+ WREG32(i2c_cntl_0, 0);
+ WREG32(i2c_cntl_1, 0);
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, remaining, current_count, buffer_offset, ret = num;
+ u32 prescale;
+ u32 tmp, reg;
+ u32 saved1, saved2;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ /* clear gpio mask bits */
+ tmp = RREG32(rec->mask_clk_reg);
+ tmp &= ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, tmp);
+ tmp = RREG32(rec->mask_clk_reg);
+
+ tmp = RREG32(rec->mask_data_reg);
+ tmp &= ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, tmp);
+ tmp = RREG32(rec->mask_data_reg);
+
+ /* clear pin values */
+ tmp = RREG32(rec->a_clk_reg);
+ tmp &= ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, tmp);
+ tmp = RREG32(rec->a_clk_reg);
+
+ tmp = RREG32(rec->a_data_reg);
+ tmp &= ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, tmp);
+ tmp = RREG32(rec->a_data_reg);
+
+ /* set the pins to input */
+ tmp = RREG32(rec->en_clk_reg);
+ tmp &= ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, tmp);
+ tmp = RREG32(rec->en_clk_reg);
+
+ tmp = RREG32(rec->en_data_reg);
+ tmp &= ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, tmp);
+ tmp = RREG32(rec->en_data_reg);
+
+ /* */
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+ saved2 = RREG32(0x494);
+ WREG32(0x494, saved2 | 0x1);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+ for (i = 0; i < 50; i++) {
+ udelay(1);
+ if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+ break;
+ }
+ if (i == 50) {
+ DRM_ERROR("failed to get i2c bus\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+ switch (rec->mask_clk_reg) {
+ case AVIVO_DC_GPIO_DDC1_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+ break;
+ case AVIVO_DC_GPIO_DDC2_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+ break;
+ case AVIVO_DC_GPIO_DDC3_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ WREG32(AVIVO_DC_I2C_DATA, 0);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(1) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ if (p->flags & I2C_M_RD) {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ for (j = 0; j < current_count; j++)
+ p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ } else {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ for (j = 0; j < current_count; j++)
+ WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+ }
+
+done:
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+ WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+ WREG32(0x494, saved2);
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ int ret = 0;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ if (rec->mm_i2c)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* XXX fill in hw i2c implementation */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+ .master_xfer = radeon_hw_i2c_xfer,
+ .functionality = radeon_hw_i2c_func,
+};
+
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_chan *i2c;
int ret;
@@ -179,24 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
if (i2c == NULL)
return NULL;
+ i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
- i2c->adapter.algo_data = &i2c->algo.bit;
- i2c->algo.bit.setsda = set_data;
- i2c->algo.bit.setscl = set_clock;
- i2c->algo.bit.getsda = get_data;
- i2c->algo.bit.getscl = get_clock;
- i2c->algo.bit.udelay = 20;
- /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
- * make this, 2 jiffies is a lot more reliable */
- i2c->algo.bit.timeout = 2;
- i2c->algo.bit.data = i2c;
- i2c->rec = *rec;
- ret = i2c_bit_add_bus(&i2c->adapter);
- if (ret) {
- DRM_INFO("Failed to register i2c %s\n", name);
- goto out_free;
+ if (rec->mm_i2c ||
+ (rec->hw_capable &&
+ radeon_hw_i2c &&
+ ((rdev->family <= CHIP_RS480) ||
+ ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+ /* set the radeon hw i2c adapter */
+ sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
+ i2c->adapter.algo = &radeon_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register hw i2c %s\n", name);
+ goto out_free;
+ }
+ } else {
+ /* set the radeon bit adapter */
+ sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.pre_xfer = pre_xfer;
+ i2c->algo.bit.post_xfer = post_xfer;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 20;
+ /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+ * make this, 2 jiffies is a lot more reliable */
+ i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c %s\n", name);
+ goto out_free;
+ }
}
return i2c;
@@ -237,12 +952,10 @@ out_free:
}
-
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
-
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
@@ -252,10 +965,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
return NULL;
}
-void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val)
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
{
u8 out_buf[2];
u8 in_buf[2];
@@ -286,10 +999,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
}
}
-void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 val)
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
{
uint8_t out_buf[2];
struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3cfd60fd0083..059bfa4098d7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,9 +68,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
- for (i = 0; i < 2; i++) {
+ rdev->irq.gui_idle = false;
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- }
+ for (i = 0; i < 6; i++)
+ rdev->irq.hpd[i] = false;
radeon_irq_set(rdev);
/* Clear bits */
radeon_irq_process(rdev);
@@ -95,34 +98,30 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
}
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
- for (i = 0; i < 2; i++) {
+ rdev->irq.gui_idle = false;
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
+ for (i = 0; i < 6; i++)
rdev->irq.hpd[i] = false;
- }
radeon_irq_set(rdev);
}
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
- int num_crtc = 2;
- if (rdev->flags & RADEON_SINGLE_CRTC)
- num_crtc = 1;
spin_lock_init(&rdev->irq.sw_lock);
- r = drm_vblank_init(rdev->ddev, num_crtc);
+ r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
- /* MSIs don't seem to work on my rs780;
- * not sure about rs880 or other rs780s.
- * Needs more investigation.
+ /* MSIs don't seem to work reliably on all IGP
+ * chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (rdev->family != CHIP_RS780) &&
- (rdev->family != CHIP_RS880)) {
+ (!(rdev->flags & RADEON_IS_IGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b05606eb5..ab389f89fa8d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,9 @@
#include "radeon.h"
#include "radeon_drm.h"
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -95,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info;
+ struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value_ptr;
uint32_t value;
+ struct drm_crtc *crtc;
+ int i, found;
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
+ value = *value_ptr;
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -111,6 +118,28 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
+ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+ value = false;
+ else
+ value = rdev->accel_working;
+ break;
+ case RADEON_INFO_CRTC_FROM_ID:
+ for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+ crtc = (struct drm_crtc *)minfo->crtcs[i];
+ if (crtc && crtc->base.id == value) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ value = radeon_crtc->crtc_id;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ DRM_DEBUG("unknown crtc id %d\n", value);
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_ACCEL_WORKING2:
value = rdev->accel_working;
break;
default:
@@ -136,6 +165,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -161,7 +191,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -173,7 +203,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -187,7 +217,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
@@ -276,17 +306,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081e1246..e1e5255396ac 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -403,7 +409,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
- radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
+ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
base -= radeon_crtc->legacy_display_base_addr;
@@ -582,29 +588,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC_V_SYNC_POL
: 0));
- /* TODO -> Dell Server */
- if (0) {
- uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
- uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
- uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
- uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-
- dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
- dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
-
- /* For CRT on DAC2, don't turn it on if BIOS didn't
- enable it, even it's detected.
- */
- disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
- tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
- tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
-
- WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
- WREG32(RADEON_DAC_CNTL2, dac2_cntl);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- }
-
if (radeon_crtc->crtc_id) {
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
@@ -626,6 +609,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC2_INTERLACE_EN
: 0));
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
@@ -653,6 +640,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC_INTERLACE_EN
: 0));
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc_gen_cntl |= RADEON_CRTC_EN;
+
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
RADEON_CRTC_VSYNC_DIS |
@@ -726,6 +717,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
@@ -977,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e231ef5..5688a0cf6bbe 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ udelay(panel_pwr_delay * 1000);
break;
}
@@ -115,6 +116,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -220,16 +222,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- adjusted_mode->base.id = mode_id;
- }
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
return true;
}
@@ -285,6 +279,7 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +465,7 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +631,7 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -813,8 +810,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
if (rdev->family == CHIP_R420 ||
- rdev->family == CHIP_R423 ||
- rdev->family == CHIP_RV410)
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410)
tv_dac_cntl |= (R420_TV_DAC_RDACPD |
R420_TV_DAC_GDACPD |
R420_TV_DAC_BDACPD |
@@ -842,6 +839,7 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
@@ -887,35 +885,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
if (rdev->family != CHIP_R200) {
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
if (rdev->family == CHIP_R420 ||
- rdev->family == CHIP_R423 ||
- rdev->family == CHIP_RV410) {
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410) {
tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
- RADEON_TV_DAC_BGADJ_MASK |
- R420_TV_DAC_DACADJ_MASK |
- R420_TV_DAC_RDACPD |
- R420_TV_DAC_GDACPD |
- R420_TV_DAC_BDACPD |
- R420_TV_DAC_TVENABLE);
+ RADEON_TV_DAC_BGADJ_MASK |
+ R420_TV_DAC_DACADJ_MASK |
+ R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ R420_TV_DAC_TVENABLE);
} else {
tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
- RADEON_TV_DAC_BGADJ_MASK |
- RADEON_TV_DAC_DACADJ_MASK |
- RADEON_TV_DAC_RDACPD |
- RADEON_TV_DAC_GDACPD |
- RADEON_TV_DAC_BDACPD);
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD);
}
- /* FIXME TV */
- if (tv_dac) {
- struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
- RADEON_TV_DAC_NHOLD |
- RADEON_TV_DAC_STD_PS2 |
- tv_dac->ps2_tvdac_adj);
+ tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+ if (is_tv) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
+ else
+ tv_dac_cntl |= tv_dac->pal_tvdac_adj;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J)
+ tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+ else
+ tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
} else
- tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
- RADEON_TV_DAC_NHOLD |
- RADEON_TV_DAC_STD_PS2);
+ tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
+ tv_dac->ps2_tvdac_adj);
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
}
@@ -923,16 +929,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
if (ASIC_IS_R300(rdev)) {
gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
- }
-
- if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
- disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
- else
+ } else if (rdev->family != CHIP_R200)
disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
-
- if (rdev->family == CHIP_R200)
+ else if (rdev->family == CHIP_R200)
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ if (rdev->family >= CHIP_R200)
+ disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
if (is_tv) {
uint32_t dac_cntl;
@@ -997,15 +1001,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
if (ASIC_IS_R300(rdev)) {
WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
- }
+ } else if (rdev->family != CHIP_R200)
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ else if (rdev->family == CHIP_R200)
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (rdev->family >= CHIP_R200)
WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
- else
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
-
- if (rdev->family == CHIP_R200)
- WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (is_tv)
radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
@@ -1163,6 +1165,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
bool color = true;
+ struct drm_crtc *crtc;
+
+ /* find out if crtc2 is in use or if this encoder is using it */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+ if (encoder->crtc != crtc) {
+ return connector_status_disconnected;
+ }
+ }
+ }
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684daef4c..032040397743 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
#define NTSC_TV_PLL_N_14 693
#define NTSC_TV_PLL_P_14 7
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
#define VERT_LEAD_IN_LINES 2
#define FRAC_BITS 0xe
#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
630627, /* defRestart */
347, /* crtcPLL_N */
14, /* crtcPLL_M */
- 8, /* crtcPLL_postDiv */
+ 8, /* crtcPLL_postDiv */
1022, /* pixToTV */
},
+ { /* PAL timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1131, /* horTotal */
+ 742, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 708369, /* defRestart */
+ 211, /* crtcPLL_N */
+ 9, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
};
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
if (pll->reference_freq == 2700)
const_ptr = &available_tv_modes[1];
else
- const_ptr = &available_tv_modes[1]; /* FIX ME */
+ const_ptr = &available_tv_modes[3];
}
return const_ptr;
}
@@ -623,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
}
flicker_removal = (tmp + 500) / 1000;
- if (flicker_removal < 2)
- flicker_removal = 2;
+ if (flicker_removal < 3)
+ flicker_removal = 3;
for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
} else {
- m = PAL_TV_PLL_M_27;
- n = PAL_TV_PLL_N_27;
- p = PAL_TV_PLL_P_27;
+ m = PAL_TV_PLL_M_14;
+ n = PAL_TV_PLL_N_14;
+ p = PAL_TV_PLL_P_14;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2aeb6a8f..95696aa57ac8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
+#include <drm_fixed.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
+struct radeon_bo;
struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_hpd_id {
+ RADEON_HPD_1 = 0,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+ RADEON_HPD_NONE = 0xff,
+};
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -83,6 +94,8 @@ struct radeon_i2c_bus_rec {
bool valid;
/* id used by atom */
uint8_t i2c_id;
+ /* id used by atom */
+ enum radeon_hpd_id hpd;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -113,6 +126,7 @@ struct radeon_tmds_pll {
#define RADEON_MAX_BIOS_CONNECTOR 16
+/* pll flags */
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -126,6 +140,13 @@ struct radeon_tmds_pll {
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
+#define RADEON_PLL_IS_LCD (1 << 13)
+
+/* pll algo */
+enum radeon_pll_algo {
+ PLL_ALGO_LEGACY,
+ PLL_ALGO_NEW
+};
struct radeon_pll {
/* reference frequency */
@@ -140,6 +161,8 @@ struct radeon_pll {
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
+ uint32_t lcd_pll_out_min;
+ uint32_t lcd_pll_out_max;
uint32_t best_vco;
/* divider limits */
@@ -157,14 +180,16 @@ struct radeon_pll {
/* pll id */
uint32_t id;
+ /* pll algo */
+ enum radeon_pll_algo algo;
};
struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
union {
- struct i2c_algo_dp_aux_data dp;
struct i2c_algo_bit_data bit;
+ struct i2c_algo_dp_aux_data dp;
} algo;
struct radeon_i2c_bus_rec rec;
};
@@ -181,6 +206,7 @@ enum radeon_connector_table {
CT_MINI_INTERNAL,
CT_IMAC_G5_ISIGHT,
CT_EMAC,
+ CT_RN50_POWER,
};
enum radeon_dvo_chip {
@@ -188,12 +214,14 @@ enum radeon_dvo_chip {
DVO_SIL1178,
};
+struct radeon_fbdev;
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[2];
+ struct radeon_crtc *crtcs[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -202,7 +230,11 @@ struct radeon_mode_info {
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
+ /* hardcoded DFP edid from BIOS */
+ struct edid *bios_hardcoded_edid;
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +269,7 @@ struct radeon_crtc {
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
+ int pll_id;
};
struct radeon_encoder_primary_dac {
@@ -303,6 +336,7 @@ struct radeon_encoder_atom_dig {
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
+ enum radeon_pll_algo pll_algo;
struct radeon_atom_ss *ss;
/* panel mode */
struct drm_display_mode native_mode;
@@ -322,7 +356,9 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int audio_polling_active;
int hdmi_offset;
+ int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
};
@@ -345,16 +381,6 @@ struct radeon_gpio_rec {
u32 mask;
};
-enum radeon_hpd_id {
- RADEON_HPD_NONE = 0,
- RADEON_HPD_1,
- RADEON_HPD_2,
- RADEON_HPD_3,
- RADEON_HPD_4,
- RADEON_HPD_5,
- RADEON_HPD_6,
-};
-
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
@@ -398,6 +424,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
@@ -411,14 +438,14 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
-extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val);
-extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
- u8 slave_addr,
- u8 addr,
- u8 val);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -432,14 +459,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *ref_div_p,
uint32_t *post_div_p);
-extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p);
-
extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +492,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -518,11 +540,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -531,7 +552,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
@@ -545,6 +565,8 @@ extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
/* legacy tv */
@@ -560,4 +582,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370928eb..d5b9373ce06c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -30,6 +30,7 @@
* Dave Airlie
*/
#include <linux/list.h>
+#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
@@ -111,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
+ mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(rdev->dev,
@@ -165,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
+ struct radeon_device *rdev;
if ((*bo) == NULL)
return;
+ rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
+ mutex_lock(&rdev->vram_mutex);
ttm_bo_unref(&tbo);
+ mutex_unlock(&rdev->vram_mutex);
if (tbo == NULL)
*bo = NULL;
}
@@ -178,7 +185,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
int r, i;
- radeon_ttm_placement_from_domain(bo, domain);
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
@@ -186,9 +192,13 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ }
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -212,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -291,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
r = radeon_bo_reserve(lobj->bo, false);
if (unlikely(r != 0))
return r;
+ lobj->reserved = true;
}
return 0;
}
@@ -301,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
list_for_each_entry(lobj, head, list) {
/* only unreserve object we successfully reserved */
- if (radeon_bo_is_reserved(lobj->bo))
+ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
radeon_bo_unreserve(lobj->bo);
}
}
@@ -312,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
struct radeon_bo *bo;
int r;
+ list_for_each_entry(lobj, head, list) {
+ lobj->reserved = false;
+ }
r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
return r;
@@ -327,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ true, false, false);
if (unlikely(r))
return r;
}
@@ -495,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1);
}
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct radeon_device *rdev;
struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
+ return 0;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..3fa6984d9896 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,744 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
+ * Alex Deucher <alexdeucher@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
+#include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
+
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+static const char *radeon_pm_state_type_name[5] = {
+ "Default",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+
+#define ACPI_AC_CLASS "ac_adapter"
+
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG("pm: AC\n");
+ else
+ DRM_DEBUG("pm: DC\n");
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+ }
+ }
+
+ return NOTIFY_OK;
+}
+#endif
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+ switch (rdev->pm.profile) {
+ case PM_PROFILE_DEFAULT:
+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+ break;
+ case PM_PROFILE_AUTO:
+ if (power_supply_is_system_supplied() > 0) {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+ } else {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ }
+ break;
+ case PM_PROFILE_LOW:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+ break;
+ case PM_PROFILE_MID:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ break;
+ case PM_PROFILE_HIGH:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+ break;
+ }
+
+ if (rdev->pm.active_crtc_count == 0) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+ } else {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+ }
+}
+
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects))
+ return;
+
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ ttm_bo_unmap_virtual(&bo->tbo);
+ }
+}
+
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+ if (rdev->pm.active_crtcs) {
+ rdev->pm.vblank_sync = false;
+ wait_event_timeout(
+ rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+ msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ }
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+ u32 sclk, mclk;
+ bool misc_after = false;
+
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
+ if (radeon_gui_idle(rdev)) {
+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk;
+ if (sclk > rdev->clock.default_sclk)
+ sclk = rdev->clock.default_sclk;
+
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+ if (mclk > rdev->clock.default_mclk)
+ mclk = rdev->clock.default_mclk;
+
+ /* upvolt before raising clocks, downvolt after lowering clocks */
+ if (sclk < rdev->pm.current_sclk)
+ misc_after = true;
+
+ radeon_sync_with_vblank(rdev);
-int radeon_debugfs_pm_init(struct radeon_device *rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (!radeon_pm_in_vbl(rdev))
+ return;
+ }
+
+ radeon_pm_prepare(rdev);
+
+ if (!misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+
+ if (misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ radeon_pm_finish(rdev);
+
+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+ } else
+ DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ int i;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ mutex_lock(&rdev->vram_mutex);
+ mutex_lock(&rdev->cp.mutex);
+
+ /* gui idle int has issues on older chips it seems */
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->irq.installed) {
+ /* wait for GPU idle */
+ rdev->pm.gui_idle = false;
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ wait_event_interruptible_timeout(
+ rdev->irq.idle_queue, rdev->pm.gui_idle,
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ }
+ } else {
+ if (rdev->cp.ready) {
+ struct radeon_fence *fence;
+ radeon_ring_alloc(rdev, 64);
+ radeon_fence_create(rdev, &fence);
+ radeon_fence_emit(rdev, fence);
+ radeon_ring_commit(rdev);
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ }
+ }
+ radeon_unmap_vram_bos(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.active_crtcs & (1 << i)) {
+ rdev->pm.req_vblank |= (1 << i);
+ drm_vblank_get(rdev->ddev, i);
+ }
+ }
+ }
+
+ radeon_set_power_state(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.req_vblank & (1 << i)) {
+ rdev->pm.req_vblank &= ~(1 << i);
+ drm_vblank_put(rdev->ddev, i);
+ }
+ }
+ }
+
+ /* update display watermarks based on new power state */
+ radeon_update_bandwidth_info(rdev);
+ if (rdev->pm.active_crtc_count)
+ radeon_bandwidth_update(rdev);
+
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+ mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&rdev->vram_mutex);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+ int i, j;
+ struct radeon_power_state *power_state;
+ struct radeon_pm_clock_info *clock_info;
+
+ DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ power_state = &rdev->pm.power_state[i];
+ DRM_DEBUG("State %d: %s\n", i,
+ radeon_pm_state_type_name[power_state->type]);
+ if (i == rdev->pm.default_power_state_index)
+ DRM_DEBUG("\tDefault");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ DRM_DEBUG("\tSingle display only\n");
+ DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ for (j = 0; j < power_state->num_clock_modes; j++) {
+ clock_info = &(power_state->clock_info[j]);
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_DEBUG("\t\t%d e: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ else
+ DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->mclk * 10,
+ clock_info->voltage.voltage,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ }
+ }
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int cp = rdev->pm.profile;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (cp == PM_PROFILE_AUTO) ? "auto" :
+ (cp == PM_PROFILE_LOW) ? "low" :
+ (cp == PM_PROFILE_MID) ? "mid" :
+ (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (strncmp("default", buf, strlen("default")) == 0)
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ else if (strncmp("auto", buf, strlen("auto")) == 0)
+ rdev->pm.profile = PM_PROFILE_AUTO;
+ else if (strncmp("low", buf, strlen("low")) == 0)
+ rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("mid", buf, strlen("mid")) == 0)
+ rdev->pm.profile = PM_PROFILE_MID;
+ else if (strncmp("high", buf, strlen("high")) == 0)
+ rdev->pm.profile = PM_PROFILE_HIGH;
+ else {
+ DRM_ERROR("invalid power profile!\n");
+ goto fail;
+ }
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+fail:
+ mutex_unlock(&rdev->pm.mutex);
+
+ return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int pm = rdev->pm.pm_method;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+
+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_DYNPM;
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ bool flush_wq = false;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ flush_wq = true;
+ }
+ /* disable dynpm */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
+ } else {
+ DRM_ERROR("invalid power method!\n");
+ goto fail;
+ }
+ radeon_pm_compute_clocks(rdev);
+fail:
+ return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+ bool flush_wq = false;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+ rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+ flush_wq = true;
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+ /* asic init will reset the default power state */
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM
+ && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ radeon_pm_compute_clocks(rdev);
+}
int radeon_pm_init(struct radeon_device *rdev)
{
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for PM!\n");
+ int ret;
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios)
+ radeon_atombios_get_power_modes(rdev);
+ else
+ radeon_combios_get_power_modes(rdev);
+ radeon_pm_print_states(rdev);
+ radeon_pm_init_profile(rdev);
+ }
+
+ if (rdev->pm.num_power_states > 1) {
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+#endif
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
+
+ DRM_INFO("radeon: power management initialized\n");
}
return 0;
}
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states > 1) {
+ bool flush_wq = false;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ /* cancel work */
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ flush_wq = true;
+ /* reset default clocks */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_set_clocks(rdev);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ if (flush_wq)
+ flush_workqueue(rdev->wq);
+
+ device_remove_file(rdev->dev, &dev_attr_power_profile);
+ device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+ }
+
+ if (rdev->pm.i2c_bus)
+ radeon_i2c_destroy(rdev->pm.i2c_bus);
+}
+
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+
+ if (rdev->pm.num_power_states < 2)
+ return;
+
+ mutex_lock(&rdev->pm.mutex);
+
+ rdev->pm.active_crtcs = 0;
+ rdev->pm.active_crtc_count = 0;
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.active_crtc_count++;
+ }
+ }
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+ if (rdev->pm.active_crtc_count > 1) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (rdev->pm.active_crtc_count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ } else { /* count == 0 */
+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+ }
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
+{
+ u32 stat_crtc = 0, vbl = 0, position = 0;
+ bool in_vbl = true;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 2)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 3)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 4)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 5)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (position < vbl && position > 1)
+ in_vbl = false;
+ } else {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+ }
+ }
+
+ if (position < vbl && position > 1)
+ in_vbl = false;
+
+ return in_vbl;
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+ u32 stat_crtc = 0;
+ bool in_vbl = radeon_pm_in_vbl(rdev);
+
+ if (in_vbl == false)
+ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ finish ? "exit" : "entry");
+ return in_vbl;
+}
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev;
+ int resched;
+ rdev = container_of(work, struct radeon_device,
+ pm.dynpm_idle_work.work);
+
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv.emited) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+
+ if (not_processed >= 3) { /* should upclock */
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_upclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_UPCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ } else if (not_processed == 0) { /* should downclock */
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_downclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_DOWNCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ }
+
+ /* Note, radeon_pm_set_clocks is called with static_switch set
+ * to false since we want to wait for vbl to avoid flicker.
+ */
+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+ jiffies > rdev->pm.dynpm_action_timeout) {
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+}
+
/*
* Debugfs info
*/
@@ -49,6 +772,10 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->pm.current_vddc)
+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+ if (rdev->asic->get_pcie_lanes)
+ seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
}
@@ -58,7 +785,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
};
#endif
-int radeon_debugfs_pm_init(struct radeon_device *rdev)
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009dd4a1..c332f46340d5 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
#include "r300_reg.h"
#include "r500_reg.h"
#include "r600_reg.h"
-
+#include "evergreen_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -346,6 +346,7 @@
# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
# define RADEON_TVCLK_TURNOFF (1 << 31)
#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
+# define RADEON_PM_MODE_SEL (1 << 13)
# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
#define RADEON_CLR_CMP_CLR_3D 0x1a24
#define RADEON_CLR_CMP_CLR_DST 0x15c8
@@ -552,7 +553,6 @@
# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
-#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
@@ -994,6 +994,7 @@
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
+# define RADEON_GUI_IDLE_MASK (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
@@ -1005,6 +1006,8 @@
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
+# define RADEON_GUI_IDLE_STAT (1 << 19)
+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1060,32 +1063,38 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1 << 0)
-#define RADEON_I2C_NACK (1 << 1)
-#define RADEON_I2C_HALT (1 << 2)
-#define RADEON_I2C_SOFT_RST (1 << 5)
-#define RADEON_I2C_DRIVE_EN (1 << 6)
-#define RADEON_I2C_DRIVE_SEL (1 << 7)
-#define RADEON_I2C_START (1 << 8)
-#define RADEON_I2C_STOP (1 << 9)
-#define RADEON_I2C_RECEIVE (1 << 10)
-#define RADEON_I2C_ABORT (1 << 11)
-#define RADEON_I2C_GO (1 << 12)
-#define RADEON_I2C_PRESCALE_SHIFT 16
+# define RADEON_I2C_DONE (1 << 0)
+# define RADEON_I2C_NACK (1 << 1)
+# define RADEON_I2C_HALT (1 << 2)
+# define RADEON_I2C_SOFT_RST (1 << 5)
+# define RADEON_I2C_DRIVE_EN (1 << 6)
+# define RADEON_I2C_DRIVE_SEL (1 << 7)
+# define RADEON_I2C_START (1 << 8)
+# define RADEON_I2C_STOP (1 << 9)
+# define RADEON_I2C_RECEIVE (1 << 10)
+# define RADEON_I2C_ABORT (1 << 11)
+# define RADEON_I2C_GO (1 << 12)
+# define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_DATA_COUNT_SHIFT 0
-#define RADEON_I2C_ADDR_COUNT_SHIFT 4
-#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
-#define RADEON_I2C_SEL (1 << 16)
-#define RADEON_I2C_EN (1 << 17)
-#define RADEON_I2C_TIME_LIMIT_SHIFT 24
+# define RADEON_I2C_DATA_COUNT_SHIFT 0
+# define RADEON_I2C_ADDR_COUNT_SHIFT 4
+# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+# define RADEON_I2C_SEL (1 << 16)
+# define RADEON_I2C_EN (1 << 17)
+# define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
-# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
-# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
-# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
+# define R200_SEL_DDC1 0 /* depends on asic */
+# define R200_SEL_DDC2 1 /* depends on asic */
+# define R200_SEL_DDC3 2 /* depends on asic */
+# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
+# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
+# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
+# define RADEON_HW_USING_DVI_I2C (1 << 15)
#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4c1f28..261e98a276db 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
@@ -34,6 +35,36 @@
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
+{
+ struct radeon_ib *ib, *n;
+
+ list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+ list_del(&ib->list);
+ vfree(ib->ptr);
+ kfree(ib);
+ }
+}
+
+void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ib *bib;
+
+ bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+ if (bib == NULL)
+ return;
+ bib->ptr = vmalloc(ib->length_dw * 4);
+ if (bib->ptr == NULL) {
+ kfree(bib);
+ return;
+ }
+ memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+ bib->length_dw = ib->length_dw;
+ mutex_lock(&rdev->ib_pool.mutex);
+ list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
/*
* IB.
*/
@@ -142,6 +173,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
@@ -187,22 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *robj;
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.robj) {
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ radeon_ib_bogus_cleanup(rdev);
+ robj = rdev->ib_pool.robj;
+ rdev->ib_pool.robj = NULL;
+ mutex_unlock(&rdev->ib_pool.mutex);
+
+ if (robj) {
+ r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->ib_pool.robj);
- radeon_bo_unpin(rdev->ib_pool.robj);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+ radeon_bo_kunmap(robj);
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
}
- radeon_bo_unref(&rdev->ib_pool.robj);
- rdev->ib_pool.robj = NULL;
+ radeon_bo_unref(&robj);
}
- mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -224,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
}
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev);
- if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ if (r)
return r;
- }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
@@ -261,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
@@ -310,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *ring_obj;
mutex_lock(&rdev->cp.mutex);
- if (rdev->cp.ring_obj) {
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ ring_obj = rdev->cp.ring_obj;
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ mutex_unlock(&rdev->cp.mutex);
+
+ if (ring_obj) {
+ r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->cp.ring_obj);
- radeon_bo_unpin(rdev->cp.ring_obj);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_kunmap(ring_obj);
+ radeon_bo_unpin(ring_obj);
+ radeon_bo_unreserve(ring_obj);
}
- radeon_bo_unref(&rdev->cp.ring_obj);
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
+ radeon_bo_unref(&ring_obj);
}
- mutex_unlock(&rdev->cp.mutex);
}
@@ -349,15 +403,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
return 0;
}
+static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct radeon_device *rdev = node->info_ent->data;
+ struct radeon_ib *ib;
+ unsigned i;
+
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (list_empty(&rdev->ib_pool.bogus_ib)) {
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "no bogus IB recorded\n");
+ return 0;
+ }
+ ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+ list_del_init(&ib->list);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ }
+ vfree(ib->ptr);
+ kfree(ib);
+ return 0;
+}
+
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+
+static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+ {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+};
#endif
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
+ int r;
+ radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+ r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+ if (r)
+ return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39ca..b3ba44c0a818 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
struct drm_file *file_priv,
- int id, u32 *data)
+ int id, struct drm_buffer *buf)
{
+ u32 *data;
switch (id) {
case RADEON_EMIT_PP_MISC:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid depth buffer offset\n");
return -EINVAL;
}
+ dev_priv->have_z_offset = 1;
break;
case RADEON_EMIT_PP_CNTL:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid colour buffer offset\n");
return -EINVAL;
}
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[0])) {
+ data = drm_buffer_pointer_to_dword(buf, 0);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R200 texture offset\n");
return -EINVAL;
}
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R100 texture offset\n");
return -EINVAL;
}
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R100 cubic texture offset\n");
return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
cmdbuf,
unsigned int *cmdsz)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 offset, narrays;
int count, i, k;
- *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ *cmdsz = 2 + count;
- if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+ if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
DRM_ERROR("Not a type 3 packet\n");
return -EINVAL;
}
- if (4 * *cmdsz > cmdbuf->bufsz) {
+ if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size larger than size of data provided\n");
return -EINVAL;
}
- switch(cmd[0] & 0xff00) {
+ switch (*cmd & 0xff00) {
/* XXX Are there old drivers needing other packets? */
case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
break;
case RADEON_3D_LOAD_VBPNTR:
- count = (cmd[0] >> 16) & 0x3fff;
if (count > 18) { /* 12 arrays max */
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
}
/* carefully check packet contents */
- narrays = cmd[1] & ~0xc000;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+ narrays = *cmd & ~0xc000;
k = 0;
i = 2;
while ((k < narrays) && (i < (count + 2))) {
i++; /* skip attribute field */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &cmd[i])) {
+ cmd)) {
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if (k == narrays)
break;
/* have one more to process, they come in pairs */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
if (radeon_check_and_fixup_offset(dev_priv,
- file_priv, &cmd[i]))
+ file_priv, cmd))
{
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
DRM_ERROR("Invalid rndr_gen_indx offset\n");
return -EINVAL;
}
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if ((*cmd & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
return -EINVAL;
}
break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
case RADEON_CNTL_PAINT_MULTI:
case RADEON_CNTL_BITBLT_MULTI:
/* MSB of opcode: next DWORD GUI_CNTL */
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid first packet offset\n");
return -EINVAL;
}
- cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+ *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
return -EINVAL;
}
- cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+ *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
}
break;
default:
- DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+ DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
return -EINVAL;
}
@@ -876,6 +899,12 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
if (tmp & RADEON_BACK)
flags |= RADEON_FRONT;
}
+ if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+ if (!dev_priv->have_z_offset) {
+ printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
+ }
if (flags & (RADEON_FRONT | RADEON_BACK)) {
@@ -1065,7 +1094,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* judging by the first tile offset needed, could possibly
directly address/clear 4x4 tiles instead of 8x2 * 4x4
macro tiles, though would still need clear mask for
- right/bottom if truely 4x4 granularity is desired ? */
+ right/bottom if truly 4x4 granularity is desired ? */
OUT_RING(tileoffset * 16);
/* the number of tiles to clear */
OUT_RING(nrtilesx + 1);
@@ -2611,7 +2640,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2648,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
sz = packet[id].len;
reg = packet[id].start;
- if (sz * sizeof(int) > cmdbuf->bufsz) {
+ if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size provided larger than data provided\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
+ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+ cmdbuf->buffer)) {
DRM_ERROR("Packet verification failed\n");
return -EINVAL;
}
BEGIN_RING(sz + 1);
OUT_RING(CP_PACKET0(reg, (sz - 1)));
- OUT_RING_TABLE(data, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2653,10 +2680,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2675,10 +2700,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2696,11 +2719,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2714,7 +2735,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(5 + sz);
@@ -2722,11 +2743,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2748,11 +2767,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
return 0;
}
@@ -2805,16 +2822,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
} while (++i < cmdbuf->nbox);
if (cmdbuf->nbox == 1)
cmdbuf->nbox = 0;
+ return 0;
out:
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
+ drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
return 0;
}
@@ -2847,16 +2864,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
return 0;
}
-static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
+ drm_radeon_cmd_header_t stack_header;
int idx;
drm_radeon_kcmd_buffer_t *cmdbuf = data;
- drm_radeon_cmd_header_t header;
- int orig_nbox, orig_bufsz;
- char *kbuf = NULL;
+ int orig_nbox;
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2871,18 +2888,20 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
* races between checking values and using those values in other code,
* and simply to avoid a lot of function calls to copy in data.
*/
- orig_bufsz = cmdbuf->bufsz;
- if (orig_bufsz != 0) {
- kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
- if (kbuf == NULL)
- return -ENOMEM;
- if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
- cmdbuf->bufsz)) {
- kfree(kbuf);
- return -EFAULT;
+ if (cmdbuf->bufsz != 0) {
+ int rv;
+ void __user *buffer = cmdbuf->buffer;
+ rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+ if (rv)
+ return rv;
+ rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+ cmdbuf->bufsz);
+ if (rv) {
+ drm_buffer_free(cmdbuf->buffer);
+ return rv;
}
- cmdbuf->buf = kbuf;
- }
+ } else
+ goto done;
orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2909,23 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (orig_bufsz != 0)
- kfree(kbuf);
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
/* microcode_version != r300 */
- while (cmdbuf->bufsz >= sizeof(header)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
- header.i = *(int *)cmdbuf->buf;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ drm_radeon_cmd_header_t *header;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case RADEON_CMD_PACKET:
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets
- (dev_priv, file_priv, header, cmdbuf)) {
+ (dev_priv, file_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_packets failed\n");
goto err;
}
@@ -2915,7 +2933,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS:
DRM_DEBUG("RADEON_CMD_SCALARS\n");
- if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars failed\n");
goto err;
}
@@ -2923,7 +2941,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_VECTORS:
DRM_DEBUG("RADEON_CMD_VECTORS\n");
- if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_vectors failed\n");
goto err;
}
@@ -2931,7 +2949,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -2968,7 +2986,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS2:
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
- if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
goto err;
}
@@ -2976,37 +2994,36 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_WAIT:
DRM_DEBUG("RADEON_CMD_WAIT\n");
- if (radeon_emit_wait(dev, header.wait.flags)) {
+ if (radeon_emit_wait(dev, header->wait.flags)) {
DRM_ERROR("radeon_emit_wait failed\n");
goto err;
}
break;
case RADEON_CMD_VECLINEAR:
DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
- if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_veclinear failed\n");
goto err;
}
break;
default:
- DRM_ERROR("bad cmd_type %d at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %d at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator);
goto err;
}
}
- if (orig_bufsz != 0)
- kfree(kbuf);
+ drm_buffer_free(cmdbuf->buffer);
+ done:
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (orig_bufsz != 0)
- kfree(kbuf);
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f929da9..313c96bc09da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - rdev->mc.gtt_location);
+ gtt_addr - rdev->mc.gtt_start);
}
out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf974ca..e9918d88f5b0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,9 +33,11 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -150,7 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = rdev->mc.gtt_location;
+ man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -161,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type);
return -EINVAL;
}
- man->io_offset = rdev->mc.agp_base;
- man->io_size = rdev->mc.gtt_size;
- man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- } else
-#endif
- {
- man->io_offset = 0;
- man->io_size = 0;
- man->io_addr = NULL;
}
+#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = rdev->mc.vram_location;
+ man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->io_addr = NULL;
- man->io_offset = rdev->mc.aper_base;
- man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -243,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict, int no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -262,10 +251,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
- old_start += rdev->mc.vram_location;
+ old_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- old_start += rdev->mc.gtt_location;
+ old_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
}
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
- new_start += rdev->mc.vram_location;
+ new_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- new_start += rdev->mc.gtt_location;
+ new_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -289,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -316,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -330,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -348,7 +338,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -368,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -393,8 +384,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
- struct ttm_mem_reg *new_mem)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -421,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
-
return r;
}
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -478,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -570,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
+ struct radeon_device *rdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
+ rdev = radeon_get_rdev(bo->bdev);
+ mutex_lock(&rdev->vram_mutex);
r = ttm_vm_ops->fault(vma, vmf);
+ mutex_unlock(&rdev->vram_mutex);
return r;
}
@@ -744,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -762,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
}
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 000000000000..f78fd592544d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,611 @@
+evergreen 0x9400
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 19c4663fa9c6..1e97b2d129fd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -125,6 +125,8 @@ r300 0x4f60
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 989f7a020832..e958980d00f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -125,6 +125,8 @@ r420 0x4f60
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 000000000000..af0da4ae3f55
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,762 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028350 SX_MISC
+0x00009604 TC_INVALIDATE
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D24 DB_HTILE_SURFACE
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x00028100 CB_COLOR0_MASK
+0x00028104 CB_COLOR1_MASK
+0x00028108 CB_COLOR2_MASK
+0x0002810C CB_COLOR3_MASK
+0x00028110 CB_COLOR4_MASK
+0x00028114 CB_COLOR5_MASK
+0x00028118 CB_COLOR6_MASK
+0x0002811C CB_COLOR7_MASK
+0x00028080 CB_COLOR0_VIEW
+0x00028084 CB_COLOR1_VIEW
+0x00028088 CB_COLOR2_VIEW
+0x0002808C CB_COLOR3_VIEW
+0x00028090 CB_COLOR4_VIEW
+0x00028094 CB_COLOR5_VIEW
+0x00028098 CB_COLOR6_VIEW
+0x0002809C CB_COLOR7_VIEW
+0x00028808 CB_COLOR_CONTROL
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009700 VC_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 6801b865d1c4..83e8bc0c2bb2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -125,6 +125,8 @@ rs600 0x6d40
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 38abf63bf2cd..1e46233985eb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -35,6 +35,7 @@ rv515 0x6d40
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
@@ -158,6 +159,8 @@ rv515 0x6d40
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fcebfb4e6..f454c9a5e7f2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -26,8 +26,10 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "rs400d.h"
/* This files gather functions specifics to : rs400,rs480 */
@@ -55,7 +57,9 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
/* FIXME: RS400 & RS480 seems to have issue with GART size
- * if 4G of system memory (needs more testing) */
+ * if 4G of system memory (needs more testing)
+ */
+ /* XXX is this still an issue with proper alignment? */
rdev->mc.gtt_size = 32 * 1024 * 1024;
DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
}
@@ -113,6 +117,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
+ radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +155,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
WREG32(RS480_AGP_BASE_2, 0);
}
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
- tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+ tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -202,9 +206,9 @@ void rs400_gart_disable(struct radeon_device *rdev)
void rs400_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rs400_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@ -241,8 +245,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs400 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@@ -251,14 +253,21 @@ void rs400_gpu_init(struct radeon_device *rdev)
}
}
-void rs400_vram_info(struct radeon_device *rdev)
+void rs400_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
rs400_gart_adjust_size(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
r100_vram_init_sizes(rdev);
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +371,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
#endif
}
-static int rs400_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32(R_00015C_NB_TOM);
- rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
-}
-
void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -399,6 +392,8 @@ static int rs400_startup(struct radeon_device *rdev)
{
int r;
+ r100_set_common_regs(rdev);
+
rs400_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -439,7 +434,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -502,7 +497,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -514,14 +509,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- rs400_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs400_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs400_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13e..6dc15ea8ba33 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,6 +37,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rs600d.h"
@@ -45,21 +46,134 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-int rs600_mc_init(struct radeon_device *rdev)
+void rs600_pm_misc(struct radeon_device *rdev)
{
- /* read back the MC value from the hw */
- int r;
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ } else if (voltage->type == VOLTAGE_VDDC)
+ radeon_atom_set_voltage(rdev, voltage->vddc_id);
+
+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+ }
+ } else {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+ }
+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+ } else
+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ hdp_dyn_cntl &= ~HDP_FORCEON;
+ else
+ hdp_dyn_cntl |= HDP_FORCEON;
+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+ /* mc_host_dyn seems to cause hangs from time to time */
+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+ else
+ mc_host_dyn_cntl |= MC_HOST_FORCEON;
+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+ else
+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
u32 tmp;
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
- rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xffffffffUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
}
/* hpd for digital panel detect/disconnect */
@@ -163,6 +277,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ u32 status, tmp;
+
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ rs600_bm_disable(rdev);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
+ return -1;
+ }
+ rv515_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ return 0;
+}
+
/*
* GART.
*/
@@ -175,7 +361,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
@@ -213,6 +399,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
WREG32(R_00004C_BUS_CNTL, tmp);
@@ -283,9 +470,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
void rs600_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
#define R600_PTE_VALID (1 << 0)
@@ -325,6 +512,9 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
+ if (rdev->irq.gui_idle) {
+ tmp |= S_000040_GUI_IDLE(1);
+ }
if (rdev->irq.crtc_vblank_int[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
@@ -347,9 +537,15 @@ int rs600_irq_set(struct radeon_device *rdev)
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
- uint32_t irq_mask = ~C_000044_SW_INT;
+ uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= S_000044_GUI_IDLE_STAT(1);
+ }
+
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -397,6 +593,9 @@ int rs600_irq_process(struct radeon_device *rdev)
uint32_t r500_disp_int;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
return IRQ_NONE;
@@ -405,11 +604,23 @@ int rs600_irq_process(struct radeon_device *rdev)
/* SW interrupt */
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
+ /* GUI idle */
+ if (G_000040_GUI_IDLE(status)) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
@@ -420,6 +631,8 @@ int rs600_irq_process(struct radeon_device *rdev)
}
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -463,34 +676,59 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-void rs600_vram_info(struct radeon_device *rdev)
+void rs600_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ base = RREG32_MC(R_000004_MC_FB_LOCATION);
+ base = G_000004_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rs600_bandwidth_update(struct radeon_device *rdev)
{
- /* FIXME: implement, should this be like rs690 ? */
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ /* FIXME: implement full support */
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+ d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ }
}
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -585,7 +823,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -647,7 +885,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -659,14 +897,8 @@ int rs600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- rs600_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs600_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs600_mc_init(rdev);
rs600_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index c1c8f5885cbb..a27c13ac47c3 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
@@ -535,4 +581,91 @@
#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
+#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
+#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
+#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
+#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
+#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+
+/* PLL regs */
+#define GENERAL_PWRMGT 0x8
+#define GLOBAL_PWRMGT_EN (1 << 0)
+#define MOBILE_SU (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH 0xc
+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
+#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
+#define STATIC_SCREEN_HILEN(x) ((x) << 24)
+#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
+#define DYN_SCLK_VOL_CNTL 0xe
+#define IO_CG_VOLTAGE_DROP (1 << 0)
+#define VOLTAGE_DROP_SYNC (1 << 2)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
+#define HDP_DYN_CNTL 0x10
+#define HDP_FORCEON (1 << 0)
+#define MC_HOST_DYN_CNTL 0x1e
+#define MC_HOST_FORCEON (1 << 0)
+#define DYN_BACKBIAS_CNTL 0x29
+#define IO_CG_BACKBIAS_EN (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
+#define PWRDN_WAIT_BUSY_OFF (1 << 0)
+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
+#define PWRDN_WAIT_PPLL_OFF (1 << 8)
+#define PWRUP_WAIT_PPLL_ON (1 << 12)
+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
+#define PM_ASSERT_RESET (1 << 20)
+#define PM_PWRDN_PPLL (1 << 24)
+
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771aee5a..ce4ecbe10816 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -27,6 +27,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rs690d.h"
@@ -47,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs690 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -57,65 +56,82 @@ static void rs690_gpu_init(struct radeon_device *rdev)
}
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
void rs690_pm_info(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
- void *ptr;
+ union igp_info *info;
uint16_t data_offset;
uint8_t frev, crev;
fixed20_12 tmp;
- atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
- &frev, &crev, &data_offset);
- ptr = rdev->mode_info.atom_context->bios + data_offset;
- info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
- info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
- /* Get various system informations from bios */
- switch (crev) {
- case 1:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
- rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
- break;
- case 2:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
- break;
- default:
- tmp.full = rfixed_const(100);
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+ /* Get various system informations from bios */
+ switch (crev) {
+ case 1:
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ if (info->info.usK8MemoryClock)
+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ else if (rdev->clock.default_mclk) {
+ rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ } else
+ rdev->pm.igp_system_mclk.full = dfixed_const(400);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
+ break;
+ case 2:
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ if (info->info_v2.ulBootUpUMAClock)
+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+ else if (rdev->clock.default_mclk)
+ rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+ else
+ rdev->pm.igp_system_mclk.full = dfixed_const(66700);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ break;
+ default:
+ /* We assume the slower possible clock ie worst case */
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+ rdev->pm.igp_system_mclk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+ DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+ break;
+ }
+ } else {
/* We assume the slower possible clock ie worst case */
- /* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
- /* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+ rdev->pm.igp_system_mclk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
- break;
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
- tmp.full = rfixed_const(4);
- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+ tmp.full = dfixed_const(4);
+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
* = ht_clk * ht_width / 5
*/
- tmp.full = rfixed_const(5);
- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+ tmp.full = dfixed_const(5);
+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
rdev->pm.igp_ht_link_width);
- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
if (tmp.full < rdev->pm.max_bandwidth.full) {
/* HT link is a limiting factor */
rdev->pm.max_bandwidth.full = tmp.full;
@@ -123,59 +139,32 @@ void rs690_pm_info(struct radeon_device *rdev)
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
* = (sideport_clk * 14) / 10
*/
- tmp.full = rfixed_const(14);
- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
- tmp.full = rfixed_const(10);
- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+ tmp.full = dfixed_const(14);
+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+ tmp.full = dfixed_const(10);
+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
-void rs690_vram_info(struct radeon_device *rdev)
+void rs690_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
+ u64 base;
rs400_gart_adjust_size(rdev);
-
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
rs690_pm_info(rdev);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- a.full = rfixed_const(16);
- /* core_bandwidth = sclk(Mhz) * 16 */
- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
-}
-
-static int rs690_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
- rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -243,10 +232,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
- /* FIXME: detect IGP with sideport memory, i don't think there is any
- * such product available
- */
- bool sideport = false;
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
@@ -254,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -276,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -300,27 +285,27 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
- if (sideport) {
+ if (rdev->mc.igp_sideport_enabled) {
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
- read_delay_latency.full = rfixed_div(read_delay_latency,
+ read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -329,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
- read_delay_latency.full = rfixed_const(5000);
+ read_delay_latency.full = dfixed_const(5000);
}
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
- a.full = rfixed_const(16);
- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
- a.full = rfixed_const(1000);
- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+ a.full = dfixed_const(16);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+ a.full = dfixed_const(1000);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
- a.full = rfixed_const(256 * 13);
- chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
- a.full = rfixed_const(10);
- chunk_time.full = rfixed_div(chunk_time, a);
+ a.full = dfixed_const(256 * 13);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+ a.full = dfixed_const(10);
+ chunk_time.full = dfixed_div(chunk_time, a);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -355,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
- a.full = rfixed_const(2);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ a.full = dfixed_const(2);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
}
@@ -375,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(4 * 8);
+ wm->dbpp.full = dfixed_const(4 * 8);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = dfixed_const(10);
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -413,10 +398,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0;
struct rs690_watermark wm1;
- u32 tmp;
+ u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -426,7 +413,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
* modes if the user specifies HIGH for displaypriority
* option.
*/
- if (rdev->disp_priority == 2) {
+ if ((rdev->disp_priority == 2) &&
+ ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
tmp &= C_000104_MC_DISP0R_INIT_LAT;
tmp &= C_000104_MC_DISP1R_INIT_LAT;
@@ -451,124 +439,136 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ }
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
S_006D48_D2MODE_PRIORITY_A_OFF(1));
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
S_006548_D1MODE_PRIORITY_A_OFF(1));
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
S_00654C_D1MODE_PRIORITY_B_OFF(1));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
}
@@ -651,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -714,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -726,14 +726,8 @@ int rs690_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- rs690_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs690_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs690_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 62d31e7a897f..36e6398a98ae 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -182,6 +182,9 @@
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8632b8..0c9c169a6852 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -26,9 +26,11 @@
* Jerome Glisse
*/
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "rv515d.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rv515_reg_safe.h"
@@ -145,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
- r100_rb2d_reset(rdev);
-
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
-
rv515_vga_render_disable(rdev);
-
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@@ -172,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
-int rv515_ga_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(CP_CSQ_MODE, 0);
- WREG32(CP_CSQ_CNTL, 0);
- WREG32(RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- rv515_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
- /* Check if GPU is idle */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
- return -1;
- }
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
- return 0;
-}
-
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -277,19 +189,16 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
}
}
-void rv515_vram_info(struct radeon_device *rdev)
+void rv515_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
rv515_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -337,7 +246,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@@ -505,7 +414,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -574,7 +483,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -585,14 +494,15 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
- /* Get vram informations */
- rv515_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ rv515_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
@@ -883,20 +793,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -905,23 +815,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -929,27 +839,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(Mhz)
*/
- a.full = rfixed_const(600 * 1000);
- chunk_time.full = rfixed_div(a, rdev->pm.sclk);
- read_delay_latency.full = rfixed_const(1000);
+ a.full = dfixed_const(600 * 1000);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+ read_delay_latency.full = dfixed_const(1000);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -959,9 +869,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -977,34 +887,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(2 * 16);
+ wm->dbpp.full = dfixed_const(2 * 16);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = wm->priority_mark_max.full;
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -1015,7 +925,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0;
struct rv515_watermark wm1;
- u32 tmp;
+ u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -1033,120 +943,132 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ }
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
}
@@ -1156,6 +1078,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -1165,7 +1089,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
* modes if the user specifies HIGH for displaypriority
* option.
*/
- if (rdev->disp_priority == 2) {
+ if ((rdev->disp_priority == 2) &&
+ (rdev->family == CHIP_RV515)) {
tmp = RREG32_MC(MC_MISC_LAT_TIMER);
tmp &= ~MC_DISP1R_INIT_LAT_MASK;
tmp &= ~MC_DISP0R_INIT_LAT_MASK;
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384d..590309a710b1 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 03021674d097..b7fd82064922 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -27,8 +27,10 @@
*/
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
@@ -40,6 +42,21 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
+}
/*
* GART
@@ -56,6 +73,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -124,9 +142,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
@@ -206,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
- WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@@ -234,7 +252,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -269,13 +286,19 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
+static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
@@ -284,6 +307,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +337,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -323,49 +359,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -385,8 +472,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -515,6 +604,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
+ default:
gb_tiling_config |= PIPE_TILING(0);
break;
case 2:
@@ -526,16 +616,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
case 8:
gb_tiling_config |= PIPE_TILING(3);
break;
- default:
- break;
}
+ rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE(0);
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +640,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
- gb_tiling_config |= BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.rv770.max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= BACKEND_MAP(backend_map);
+
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -572,15 +669,15 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -590,10 +687,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
- WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
- SYNC_GRADIENT |
- SYNC_WALKER |
- SYNC_ALIGNER));
+ ta_aux_cntl = RREG32(TA_CNTL_AUX);
+ WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +699,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
- WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
- GS_FLUSH_CTL(4) |
- ACK_FLUSH_CTL(3) |
- SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV740)
+ WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
- if (rdev->family == CHIP_RV770)
- WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RREG32(DB_DEBUG3);
+ db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ WREG32(DB_DEBUG3, db_debug3);
+
+ if (rdev->family != CHIP_RV770) {
db_debug4 = RREG32(DB_DEBUG4);
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +749,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ALU_UPDATE_FIFO_HIWATER(0x8));
switch (rdev->family) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -779,7 +888,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
int rv770_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u32 tmp;
int chansize, numchan;
@@ -816,58 +924,12 @@ int rv770_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r600_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- }
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
return 0;
}
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
- /* FIXME: implement any rv770 specific bits */
- return r600_gpu_reset(rdev);
-}
static int rv770_startup(struct radeon_device *rdev)
{
@@ -959,6 +1021,13 @@ int rv770_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon: audio init failed\n");
+ return r;
+ }
+
return r;
}
@@ -967,6 +1036,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
@@ -1032,12 +1102,11 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ /* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
@@ -1069,7 +1138,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1089,13 +1158,20 @@ int rv770_init(struct radeon_device *rdev)
}
}
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab6f261..9506f8cb99e0 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
#define WAIT_UNTIL 0x8040
+#define SRBM_STATUS 0x0E50
+
#endif
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2524c8..2d0c9ca484c5 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4648ed2f0143..4bf69c404491 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -35,6 +35,7 @@
#include "ttm/ttm_placement.h"
#include <linux/agp_backend.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <asm/agp.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c7320ce4567d..555ebb12ace8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
@@ -128,7 +126,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
NULL
};
-static struct sysfs_ops ttm_bo_global_ops = {
+static const struct sysfs_ops ttm_bo_global_ops = {
.show = &ttm_bo_global_show
};
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
- bool evict, bool interruptible, bool no_wait)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait, mem);
+ no_wait_reserve, no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched)
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved = false;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
+ if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved = false;
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret;
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1425,8 +1447,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
atomic_set(&glob->bo_count, 0);
- kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
- ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
+ ret = kobject_init_and_add(
+ &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset, unsigned long *bus_size)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- *bus_size = 0;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- if (ttm_mem_reg_is_pci(bdev, mem)) {
- *bus_offset = mem->mm_node->start << PAGE_SHIFT;
- *bus_size = mem->num_pages << PAGE_SHIFT;
- *bus_base = man->io_offset;
- }
-
- return 0;
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ ttm_mem_io_free(bdev, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1716,40 +1718,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
-{
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-}
-
-int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
-{
- int ret;
-
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
- if (no_wait)
- return -EBUSY;
- else if (interruptible) {
- ret = wait_event_interruptible
- (bo->event_queue, atomic_read(&bo->reserved) == 0);
- if (unlikely(ret != 0))
- return ret;
- } else {
- wait_event(bo->event_queue,
- atomic_read(&bo->reserved) == 0);
- }
- }
- return 0;
-}
-
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
int ret = 0;
/*
- * Using ttm_bo_reserve instead of ttm_bo_block_reservation
- * makes sure the lru lists are updated.
+ * Using ttm_bo_reserve makes sure the lru lists are updated.
*/
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
@@ -1839,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ false, false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5ca37a58a98c..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
@@ -49,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -80,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ int ret;
+
+ if (!mem->bus.io_reserved) {
+ mem->bus.io_reserved = true;
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ if (bdev->driver->io_mem_reserve) {
+ if (mem->bus.io_reserved) {
+ mem->bus.io_reserved = false;
+ bdev->driver->io_mem_free(bdev, mem);
+ }
+ }
+}
+
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret || !mem->bus.is_iomem)
return ret;
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
- else {
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ if (!addr) {
+ ttm_mem_io_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -116,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type];
- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ ttm_mem_io_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -207,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -368,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
+ unsigned long offset,
+ unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -440,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ unsigned long offset, size;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -455,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
if (ret)
return ret;
- if (bus_size == 0) {
+ if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -476,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -493,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify)
- bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
/*
* Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
+ ret = ttm_mem_io_reserve(bdev, &bo->mem);
+ if (ret) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (bo->mem.bus.is_iomem)
+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3d172ef04ee1..de41e55a944a 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -204,7 +204,6 @@ static int __ttm_vt_unlock(struct ttm_lock *lock)
lock->flags &= ~TTM_VT_LOCK;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
- printk(KERN_INFO TTM_PFX "vt unlock.\n");
return ret;
}
@@ -265,10 +264,8 @@ int ttm_vt_lock(struct ttm_lock *lock,
ttm_lock_type, &ttm_vt_lock_remove, NULL);
if (ret)
(void)__ttm_vt_unlock(lock);
- else {
+ else
lock->vt_holder = tfile;
- printk(KERN_INFO TTM_PFX "vt lock.\n");
- }
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index f5245c02b8fd..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,11 +27,13 @@
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/slab.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
@@ -152,7 +154,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
NULL
};
-static struct sysfs_ops ttm_mem_zone_ops = {
+static const struct sysfs_ops ttm_mem_zone_ops = {
.show = &ttm_mem_zone_show,
.store = &ttm_mem_zone_store
};
@@ -260,8 +262,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -296,8 +298,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -343,8 +345,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -365,10 +367,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
init_waitqueue_head(&glob->queue);
- kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
- ret = kobject_add(&glob->kobj,
- ttm_get_kobj(),
- "memory_accounting");
+ ret = kobject_init_and_add(
+ &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
kobject_put(&glob->kobj);
return ret;
@@ -394,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -406,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
@@ -413,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..ca904799f018
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ spinlock_t lock;
+ bool fill_lock;
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ struct kobject kobj;
+ struct shrinker mm_shrink;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager *_manager;
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager->pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ if (set_pages_array_wb(pages, npages))
+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
+ npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_to_free) {
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate memory for pool free operation.\n");
+ return 0;
+ }
+
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ list_for_each_entry_reverse(p, &pool->list, lru) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ __list_del(p->lru.prev, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall tough or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager->pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to uc!\n",
+ cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to wc!\n",
+ cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ struct page **failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages have to be freed */
+ for (i = 0; i < cpages; ++i) {
+ list_del(&failed_pages[i]->lru);
+ __free_page(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ struct page **caching_array;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate table for new pages.");
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = alloc_page(gfp_flags);
+
+ if (!p) {
+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ list_add(&p->lru, pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ struct page *p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there is not enough
+ * pages in pool we fill the pool first */
+ if (count < _manager->options.small
+ && count > pool->npages) {
+ struct list_head new_pages;
+ unsigned alloc_size = _manager->options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ INIT_LIST_HEAD(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+ cstate, alloc_size);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ if (!r) {
+ list_splice(&new_pages, &pool->list);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Failed to fill pool (%p).", pool);
+ /* If we have any pages left put them to the pool. */
+ list_for_each_entry(p, &pool->list, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ unsigned long irq_flags;
+ struct list_head *p;
+ unsigned i;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ list_splice_init(&pool->list, pages);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ /* find the last pages to include for requested number of pages. Split
+ * pool to begin and halves to reduce search space. */
+ if (count <= pool->npages/2) {
+ i = 0;
+ list_for_each(p, &pool->list) {
+ if (++i == count)
+ break;
+ }
+ } else {
+ i = pool->npages + 1;
+ list_for_each_prev(p, &pool->list) {
+ if (--i == count)
+ break;
+ }
+ }
+ /* Cut count number of pages from pool */
+ list_cut_position(pages, &pool->list, p);
+ pool->npages -= count;
+ count = 0;
+out:
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p = NULL;
+ int gfp_flags = GFP_USER;
+ int r;
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+ for (r = 0; r < count; ++r) {
+ p = alloc_page(gfp_flags);
+ if (!p) {
+
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&p->lru, pages);
+ }
+ return 0;
+ }
+
+
+ /* combine zero flag to pool flags */
+ gfp_flags |= pool->gfp_flags;
+
+ /* First we take pages from the pool */
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+ clear_page(page_address(p));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate);
+ return r;
+ }
+ }
+
+
+ return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager->options.max_size) {
+ page_count = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ spin_lock_init(&pool->lock);
+ pool->fill_lock = false;
+ INIT_LIST_HEAD(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret;
+
+ WARN_ON(_manager);
+
+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+
+ ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+
+ ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+
+ ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+ GFP_USER | GFP_DMA32, "wc dma");
+
+ ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+ GFP_USER | GFP_DMA32, "uc dma");
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+ return ret;
+ }
+
+ ttm_pool_mm_shrink_init(_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+ int i;
+
+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
+ ttm_pool_mm_shrink_fini(_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager->pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c12322..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,65 +28,35 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/swap.h>
+#include <linux/slab.h>
#include "drm_cache.h"
+#include "drm_mem_util.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* Allocates storage for pointers to the pages that back the ttm.
- *
- * Uses kmalloc if possible. Otherwise falls back to vmalloc.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
- ttm->pages = NULL;
-
- if (size <= PAGE_SIZE)
- ttm->pages = kzalloc(size, GFP_KERNEL);
-
- if (!ttm->pages) {
- ttm->pages = vmalloc_user(size);
- if (ttm->pages)
- ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
- }
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
- if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
- vfree(ttm->pages);
- ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
- } else {
- kfree(ttm->pages);
- }
+ drm_free_large(ttm->pages);
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -127,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -244,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -298,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
@@ -314,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
@@ -480,7 +462,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
void *from_virtual;
void *to_virtual;
int i;
- int ret;
+ int ret = -ENOMEM;
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +481,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
from_page = read_mapping_page(swap_space, i, NULL);
- if (IS_ERR(from_page))
+ if (IS_ERR(from_page)) {
+ ret = PTR_ERR(from_page);
goto out_err;
+ }
to_page = __ttm_tt_get_page(ttm, i);
if (unlikely(to_page == NULL))
goto out_err;
@@ -523,7 +507,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
ttm_tt_free_alloced_pages(ttm);
- return -ENOMEM;
+ return ret;
}
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +519,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
void *from_virtual;
void *to_virtual;
int i;
+ int ret = -ENOMEM;
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +542,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
0);
if (unlikely(IS_ERR(swap_storage))) {
printk(KERN_ERR "Failed allocating swap storage.\n");
- return -ENOMEM;
+ return PTR_ERR(swap_storage);
}
} else
swap_storage = persistant_swap_storage;
@@ -569,9 +554,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
if (unlikely(from_page == NULL))
continue;
to_page = read_mapping_page(swap_space, i, NULL);
- if (unlikely(to_page == NULL))
+ if (unlikely(IS_ERR(to_page))) {
+ ret = PTR_ERR(to_page);
goto out_err;
-
+ }
preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0);
to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +581,5 @@ out_err:
if (!persistant_swap_storage)
fput(swap_storage);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 327380888b4a..4c54f043068e 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -40,6 +40,7 @@
#include "via_dmablit.h"
#include <linux/pagemap.h>
+#include <linux/slab.h>
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 5935b8842e86..34079f251cd4 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -150,7 +150,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
cur_irq++;
}
- /* Acknowlege interrupts */
+ /* Acknowledge interrupts */
VIA_WRITE(VIA_REG_INTERRUPT, status);
@@ -165,7 +165,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
u32 status;
if (dev_priv) {
- /* Acknowlege interrupts */
+ /* Acknowledge interrupts */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status |
dev_priv->irq_pending_mask);
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac12459..6efac8117c93 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -75,7 +75,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
DRM_DEBUG("\n");
- if (fx->lock > VIA_NR_XVMC_LOCKS)
+ if (fx->lock >= VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index f20b8bcbef39..30ad13344f7b 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && FB
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb6816d1c..4505e17df3f5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o
+ vmwgfx_overlay.o vmwgfx_fence.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
- man->io_offset = dev_priv->vram_start;
- man->io_size = dev_priv->vram_size;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo);
}
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.is_iomem = false;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->vram_start;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify
+ .swap_notify = vmw_swap_notify,
+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+ .io_mem_free = &vmw_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c0811f42d..b793c8c9acb3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -88,6 +88,9 @@
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ struct drm_vmw_update_layout_arg)
/**
@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED)
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3;
}
+ /* Need mmio memory to check for fifo pitchlock cap. */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+ !vmw_fifo_have_pitchlock(dev_priv)) {
+ ret = -ENOSYS;
+ DRM_ERROR("Hardware has no pitchlock\n");
+ goto out_err4;
+ }
+
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
-
unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv);
@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
{
struct vmw_master *vmaster;
- DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{
struct vmw_master *vmaster = vmw_master(master);
- DRM_INFO("Master destroy.\n");
master->driver_priv = NULL;
kfree(vmaster);
}
@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- DRM_INFO("Master set.\n");
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- DRM_INFO("Master drop.\n");
-
/**
* Make sure the master doesn't disappear while we have
* it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc935ec13..eaad52095339 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,12 +41,13 @@
#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
+#define VMWGFX_MAX_DISPLAYS 16
struct vmw_fpriv {
struct drm_master *locked_master;
@@ -102,6 +103,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper;
};
+struct vmw_fence_queue {
+ struct list_head head;
+ struct timespec lag;
+ struct timespec lag_time;
+ spinlock_t lock;
+};
+
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
@@ -115,6 +123,7 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
+ struct vmw_fence_queue fence_queue;
};
struct vmw_relocation {
@@ -144,6 +153,14 @@ struct vmw_master {
struct ttm_lock lock;
};
+struct vmw_vga_topology_state {
+ uint32_t width;
+ uint32_t height;
+ uint32_t primary;
+ uint32_t pos_x;
+ uint32_t pos_y;
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -171,14 +188,19 @@ struct vmw_private {
* VGA registers.
*/
+ struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_depth;
uint32_t vga_bpp;
uint32_t vga_pseudo;
uint32_t vga_red_mask;
- uint32_t vga_blue_mask;
uint32_t vga_green_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_bpl;
+ uint32_t vga_pitchlock;
+
+ uint32_t num_displays;
/*
* Framebuffer info.
@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state);
+
+
+/**
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
+ */
+
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..8e396850513c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
@@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(cmd, user_cmd, arg->command_size);
if (unlikely(ret != 0)) {
+ ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_commit;
}
@@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err;
vmw_apply_relocations(sw_context);
+
+ if (arg->throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+ arg->throttle_us);
+
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..b0866f04ec76 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- /* without multimon its hard to resize */
- if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
- (var->xres != par->max_width ||
- var->yres != par->max_height)) {
- DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ (var->xoffset != 0 || var->yoffset != 0)) {
+ DRM_ERROR("Can not handle panning without display topology\n");
return -EINVAL;
}
- if (var->xres > par->max_width ||
- var->yres > par->max_height) {
+ if ((var->xoffset + var->xres) > par->max_width ||
+ (var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
+ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+ if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
-
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- } else {
- vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
-
- /* TODO check if pitch and offset changes */
}
+ /* This is really helpful since if this fails the user
+ * can probably not see anything on the screen.
+ */
+ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
return 0;
}
@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
+ /* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
fb_bbp = 32;
fb_depth = 24;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- } else {
- fb_width = min(vmw_priv->fb_max_width, initial_width);
- fb_height = min(vmw_priv->fb_max_height, initial_height);
- }
+ /* XXX As shouldn't these be as well. */
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
- fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_pitch = fb_width * fb_bbp / 8;
+ fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
- fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
-
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
- DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
- DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
- DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
- DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
- DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
- DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
- DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
- DRM_DEBUG("fb_pitch %u\n", fb_pitch);
- DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
@@ -559,8 +516,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
- info->aperture_base = vmw_priv->vram_start;
- info->aperture_size = vmw_priv->vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_aper;
+ }
+ info->apertures->ranges[0].base = vmw_priv->vram_start;
+ info->apertures->ranges[0].size = vmw_priv->vram_size;
/*
* Dirty & Deferred IO
@@ -580,6 +542,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
err_defio:
fb_deferred_io_cleanup(info);
+err_aper:
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +591,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@@ -652,7 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+ /* Could probably bug on */
+ WARN_ON(bo->offset != 0);
+
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 000000000000..61eacc1b5ca3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_fence {
+ struct list_head head;
+ uint32_t sequence;
+ struct timespec submitted;
+};
+
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+{
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
+}
+
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+{
+ struct vmw_fence *fence, *next;
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ kfree(fence);
+ }
+ spin_unlock(&queue->lock);
+}
+
+int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence)
+{
+ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (unlikely(!fence))
+ return -ENOMEM;
+
+ fence->sequence = sequence;
+ getrawmonotonic(&fence->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&fence->head, &queue->head);
+ spin_unlock(&queue->lock);
+
+ return 0;
+}
+
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence)
+{
+ struct vmw_fence *fence, *next;
+ struct timespec now;
+ bool updated = false;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ if (signaled_sequence - fence->sequence > (1 << 30))
+ continue;
+
+ queue->lag = timespec_sub(now, fence->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&fence->head);
+ kfree(fence);
+ }
+
+out_unlock:
+ spin_unlock(&queue->lock);
+
+ return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
+{
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
+ }
+
+ return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+{
+ struct timespec now;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+ uint32_t us)
+{
+ struct timespec lag, cond;
+
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us)
+{
+ struct vmw_fence *fence;
+ uint32_t sequence;
+ int ret;
+
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ sequence = atomic_read(&dev_priv->fence_seq);
+ else {
+ fence = list_first_entry(&queue->head,
+ struct vmw_fence, head);
+ sequence = fence->sequence;
+ }
+ spin_unlock(&queue->lock);
+
+ ret = vmw_wait_fence(dev_priv, false, sequence, true,
+ 3*HZ);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ (void) vmw_fence_pull(queue, sequence);
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a01d846..e6a1eb7ea954 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
return true;
}
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t caps;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
+ caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+ return true;
+
+ return false;
+}
+
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
-
+ vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
out_err:
vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex);
+ vmw_fence_queue_takedown(&fifo->fence_queue);
if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false;
+ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+ vmw_update_sequence(dev_priv, fifo_state);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb5393860..e92298a6a383 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
+void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ if (dev_priv->last_read_sequence != sequence) {
+ dev_priv->last_read_sequence = sequence;
+ vmw_fence_pull(&fifo_state->fence_queue, sequence);
+ }
+}
bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ fifo_state = &dev_priv->fifo;
+ vmw_update_sequence(dev_priv, fifo_state);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence))
return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a63..437ac786277a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = 24;
vfbs->base.base.width = width;
vfbs->base.base.height = height;
- vfbs->base.pin = NULL;
- vfbs->base.unpin = NULL;
+ vfbs->base.pin = &vmw_surface_dmabuf_pin;
+ vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
mutex_init(&vfbs->work_lock);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+ int ret;
+
+ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+ if (unlikely(vfbs->buffer == NULL))
+ return -ENOMEM;
+
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
+
+ return ret;
+}
+
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct ttm_buffer_object *bo;
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+
+ bo = &vfbs->buffer->base;
+ ttm_bo_unref(&bo);
+ vfbs->buffer = NULL;
+
+ return 0;
+}
+
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
+
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
- vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
- vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
- vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
- vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
- vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
- } else
- WARN_ON(true);
-
vmw_overlay_resume_all(dev_priv);
+ WARN_ON(ret != 0);
+
return 0;
}
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32;
- vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24;
vfbd->base.base.width = width;
vfbd->base.base.height = height;
@@ -752,14 +771,8 @@ err_not_scanout:
return NULL;
}
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
- return 0;
-}
-
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
- .fb_changed = vmw_kms_fb_changed,
};
int vmw_kms_init(struct vmw_private *dev_priv)
@@ -771,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- dev->mode_config.max_width = dev_priv->fb_max_width;
- dev->mode_config.max_height = dev_priv->fb_max_height;
+ /* assumed largest fb size */
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
ret = vmw_kms_init_legacy_display_system(dev_priv);
@@ -832,49 +846,141 @@ out:
return ret;
}
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth)
+{
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+}
+
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
- /*
- * setup a single multimon monitor with the size
- * of 0x0, this stops the UI from resizing when we
- * change the framebuffer size
- */
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_priv->vga_pitchlock =
+ vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
+ SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
+ vmw_priv->num_displays = vmw_read(vmw_priv,
+ SVGA_REG_NUM_GUEST_DISPLAYS);
+
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
+ save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
+ save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
+ save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
+ save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
+
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+ vmw_priv->vga_pitchlock);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(vmw_priv->vga_pitchlock,
+ vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
- /* TODO check for multimon */
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
+
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_update_layout_arg *arg =
+ (struct drm_vmw_update_layout_arg *)data;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ void __user *user_rects;
+ struct drm_vmw_rect *rects;
+ unsigned rects_size;
+ int ret;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!arg->num_outputs) {
+ struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
+ goto out_unlock;
+ }
+
+ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+ rects = kzalloc(rects_size, GFP_KERNEL);
+ if (unlikely(!rects)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_rects = (void __user *)(unsigned long)arg->rects;
+ ret = copy_from_user(rects, user_rects, rects_size);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to get rects.\n");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
+
+out_free:
+ kfree(rects);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8b95249f0531..8a398a0339b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
/*
- * Legacy display unit functions - vmwgfx_ldu.h
+ * Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90891593bf6c..cfaf690a5b2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
struct list_head active;
unsigned num_active;
+ unsigned last_num_active;
struct vmw_framebuffer *fb;
};
@@ -48,9 +49,12 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
- struct list_head active;
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
- unsigned unit;
+ struct list_head active;
};
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
- struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *crtc = NULL;
int i = 0;
- /* to stop the screen from changing size on resize */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
- for (i = 0; i < lds->num_active; i++) {
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ /* If there is no display topology the host just assumes
+ * that the guest will set the same layout as the host.
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+ int w = 0, h = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+ w = max(w, crtc->x + crtc->mode.hdisplay);
+ h = max(h, crtc->y + crtc->mode.vdisplay);
+ i++;
+ }
+
+ if (crtc == NULL)
+ return 0;
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+
+ return 0;
}
- /* Now set the mode */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ if (!list_empty(&lds->active)) {
+ entry = list_entry(lds->active.next, typeof(*entry), active);
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+ }
+
+ /* Make sure we always show something. */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+ lds->num_active ? lds->num_active : 1);
+
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
i++;
}
+ BUG_ON(i != lds->num_active);
+
+ lds->last_num_active = lds->num_active;
+
return 0;
}
@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
if (list_empty(&ldu->active))
return 0;
+ /* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *entry;
struct list_head *at;
+ BUG_ON(!ld->num_active && ld->fb);
+ if (vfb != ld->fb) {
+ if (ld->fb && ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
if (!list_empty(&ldu->active))
return 0;
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
- if (entry->unit > ldu->unit)
+ if (entry->base.unit > ldu->base.unit)
break;
at = &entry->active;
}
list_add(&ldu->active, at);
- if (ld->num_active++ == 0) {
- BUG_ON(ld->fb);
- if (vfb->pin)
- vfb->pin(vfb);
- ld->fb = vfb;
- }
+
+ ld->num_active++;
return 0;
}
@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
+ !(dev_priv->ldu_priv->num_active == 1 &&
+ !list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
static enum drm_connector_status
vmw_ldu_connector_detect(struct drm_connector *connector)
{
- /* XXX vmwctrl should control connection status */
- if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
return connector_status_disconnected;
}
@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
- { DRM_MODE("800x600",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
- 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
+ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
int i;
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = ldu->pref_width;
+ mode->vdisplay = ldu->pref_height;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_probed_add(connector, mode);
+
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
+
+ ldu->pref_mode = mode;
+ }
+
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
if (!ldu)
return -ENOMEM;
- ldu->unit = unit;
+ ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
+ INIT_LIST_HEAD(&ldu->active);
+
+ ldu->pref_active = (unit == 0);
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_mode = NULL;
+
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- /* Initial status */
- if (unit == 0)
- connector->status = connector_status_connected;
- else
- connector->status = connector_status_disconnected;
+ connector->status = vmw_ldu_connector_detect(connector);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- INIT_LIST_HEAD(&ldu->active);
-
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_connector_attach_property(connector,
@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0);
- vmw_ldu_init(dev_priv, 1);
- vmw_ldu_init(dev_priv, 2);
- vmw_ldu_init(dev_priv, 3);
- vmw_ldu_init(dev_priv, 4);
- vmw_ldu_init(dev_priv, 5);
- vmw_ldu_init(dev_priv, 6);
- vmw_ldu_init(dev_priv, 7);
+ /* for old hardware without multimon only enable one display */
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+ }
return 0;
}
@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
+
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *con;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < (int)num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+#else
+ (void)i;
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ ldu = vmw_connector_to_ldu(con);
+ if (num > ldu->base.unit) {
+ ldu->pref_width = rects[ldu->base.unit].w;
+ ldu->pref_height = rects[ldu->base.unit].h;
+ ldu->pref_active = true;
+ } else {
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_active = false;
+ }
+ con->status = vmw_ldu_connector_detect(con);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..df2036ed18d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg;
+ /* stream is no longer stopped/paused */
+ stream->paused = false;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index f8fbbc67a406..8612378b131e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(srf->sizes, user_sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
goto out_err1;
+ }
if (srf->scanout &&
srf->num_sizes == 1 &&
@@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
out_bad_resource:
out_no_reference:
ttm_base_object_unref(&base);