summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2013-07-23 09:41:05 -0400
committerAlex Deucher <alexander.deucher@amd.com>2013-08-30 16:30:08 -0400
commit22c775ce80ed921fe9490f3cc2ca66dcda44f572 (patch)
tree5c330689c876b94c34857feefdac1b2cbb27cbc3 /drivers/gpu/drm/radeon/evergreen.c
parent1fd11777c2f0e6b6b37432b984bf40e3c6072f23 (diff)
drm/radeon: implement clock and power gating for CIK (v3)
Only the APUs support power gating. v2: disable cgcg for now v3: workaround hw issue in mgcg Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 6fc876a444d4..2ce12ee3e67f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -140,6 +140,7 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
extern void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr);
+void cik_init_cp_pg_table(struct radeon_device *rdev);
static const u32 evergreen_golden_registers[] =
{
@@ -3893,8 +3894,22 @@ void sumo_rlc_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
+
+ /* clear state block */
+ if (rdev->rlc.cp_table_obj) {
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ radeon_bo_unref(&rdev->rlc.cp_table_obj);
+ rdev->rlc.cp_table_obj = NULL;
+ }
}
+#define CP_ME_TABLE_SIZE 96
+
int sumo_rlc_init(struct radeon_device *rdev)
{
const u32 *src_ptr;
@@ -3980,9 +3995,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
}
reg_list_blk_index = (3 * reg_list_num + 2);
dws += reg_list_blk_index;
+ rdev->rlc.clear_state_size = dws;
if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, rdev->rlc.clear_state_size * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
@@ -4046,6 +4062,45 @@ int sumo_rlc_init(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
}
+ if (rdev->rlc.cp_table_size) {
+ if (rdev->rlc.cp_table_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.cp_table_gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+
+ cik_init_cp_pg_table(rdev);
+
+ radeon_bo_kunmap(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ }
+
return 0;
}