Commit 5dc9cbc4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes and cleanups from Dave Airlie:
 "The main thing are a bunch of fixes for the new amd display code, a
  bunch of smatch fixes.

  core:
   - Atomic helper regression fix.
   - Deferred fbdev fallout regression fix.

  amdgpu:
   - New display code (dc) dpms, suspend/resume and smatch fixes, along
     with some others
   - Some regression fixes for amdkfd/radeon.
   - Fix a ttm regression for swiotlb disabled

  bridge:
   - A bunch of fixes for the tc358767 bridge

  mali-dp + hdlcd:
   - some fixes and internal API catchups.

  imx-drm:
   -regression fix in atomic code.

  omapdrm:
   - platform detection regression fixes"

* tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux: (76 commits)
  drm/imx: always call wait_for_flip_done in commit_tail
  omapdrm: hdmi4_cec: signedness bug in hdmi4_cec_init()
  drm: omapdrm: Fix DPI on platforms using the DSI VDDS
  omapdrm: hdmi4: Correct the SoC revision matching
  drm/omap: displays: panel-dpi: add backlight dependency
  drm/omap: Fix error handling path in 'omap_dmm_probe()'
  drm/i915: Disable THP until we have a GPU read BW W/A
  drm/bridge: tc358767: fix 1-lane behavior
  drm/bridge: tc358767: fix AUXDATAn registers access
  drm/bridge: tc358767: fix timing calculations
  drm/bridge: tc358767: fix DP0_MISC register set
  drm/bridge: tc358767: filter out too high modes
  drm/bridge: tc358767: do no fail on hi-res displays
  drm/bridge: Fix lvds-encoder since the panel_bridge rework.
  drm/bridge: synopsys/dw-hdmi: Enable cec clock
  drm/bridge: adv7511/33: Fix adv7511_cec_init() failure handling
  drm/radeon: remove init of CIK VMIDs 8-16 for amdkfd
  drm/ttm: fix populate_and_map() functions once more
  drm/fb_helper: Disable all crtc's when initial setup fails.
  drm/atomic: make drm_atomic_helper_wait_for_vblanks more agressive
  ...
parents 75f64f68 503505bf
...@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, ...@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr); struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev, int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr, struct amdgpu_queue_mgr *mgr,
int hw_ip, int instance, int ring, u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring); struct amdgpu_ring **out_ring);
/* /*
...@@ -1572,18 +1572,14 @@ struct amdgpu_device { ...@@ -1572,18 +1572,14 @@ struct amdgpu_device {
/* sdma */ /* sdma */
struct amdgpu_sdma sdma; struct amdgpu_sdma sdma;
union { /* uvd */
struct { struct amdgpu_uvd uvd;
/* uvd */
struct amdgpu_uvd uvd;
/* vce */ /* vce */
struct amdgpu_vce vce; struct amdgpu_vce vce;
};
/* vcn */ /* vcn */
struct amdgpu_vcn vcn; struct amdgpu_vcn vcn;
};
/* firmwares */ /* firmwares */
struct amdgpu_firmware firmware; struct amdgpu_firmware firmware;
......
...@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) ...@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m; struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
uint32_t sdma_base_addr; uint32_t sdma_base_addr;
uint32_t data;
m = get_sdma_mqd(mqd); m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m); sdma_base_addr = get_sdma_base_addr(m);
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_virtual_addr); m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, end_jiffies = msecs_to_jiffies(2000) + jiffies;
m->sdma_rlc_rb_base); while (true) {
data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies))
return -ETIME;
usleep_range(500, 1000);
}
if (m->sdma_engine_id) {
data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
RESUME_CTX, 0);
WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
} else {
data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
RESUME_CTX, 0);
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
m->sdma_rlc_doorbell);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi); m->sdma_rlc_rb_base_hi);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo); m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi); m->sdma_rlc_rb_rptr_addr_hi);
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
m->sdma_rlc_doorbell);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl); m->sdma_rlc_rb_cntl);
...@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, ...@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
} }
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0; return 0;
} }
......
...@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
if (candidate->robj == validated) if (candidate->robj == validated)
break; break;
/* We can't move pinned BOs here */
if (bo->pin_count)
continue;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* Check if this BO is in one of the domains we need space for */ /* Check if this BO is in one of the domains we need space for */
......
...@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw) if (!adev->ip_blocks[i].status.sw)
continue; continue;
......
...@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */ /* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0, 0, 0} {0, 0, 0}
}; };
......
...@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle) ...@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
ret = adev->powerplay.ip_funcs->hw_fini( ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
return ret; return ret;
} }
......
...@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle) ...@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0; return 0;
amdgpu_ucode_fini_bo(adev);
psp_ring_destroy(psp, PSP_RING_TYPE__KM); psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
......
...@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, ...@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
static int amdgpu_identity_map(struct amdgpu_device *adev, static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper, struct amdgpu_queue_mapper *mapper,
int ring, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
switch (mapper->hw_ip) { switch (mapper->hw_ip) {
...@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) ...@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev, static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper, struct amdgpu_queue_mapper *mapper,
int user_ring, bool lru_pipe_order, u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, i, j; int r, i, j;
...@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, ...@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
*/ */
int amdgpu_queue_mgr_map(struct amdgpu_device *adev, int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr, struct amdgpu_queue_mgr *mgr,
int hw_ip, int instance, int ring, u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, ip_num_rings; int r, ip_num_rings;
......
...@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = ...@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, true}, {mmPA_SC_RASTER_CONFIG_1, true},
}; };
static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
u32 se_num, u32 sh_num, static uint32_t cik_get_register_value(struct amdgpu_device *adev,
u32 reg_offset) bool indexed, u32 se_num,
u32 sh_num, u32 reg_offset)
{ {
uint32_t val; if (indexed) {
uint32_t val;
unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
switch (reg_offset) {
case mmCC_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
case mmGC_USER_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
case mmPA_SC_RASTER_CONFIG:
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
case mmPA_SC_RASTER_CONFIG_1:
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
}
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff) if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset); val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff) if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
return val; return val;
} else {
unsigned idx;
switch (reg_offset) {
case mmGB_ADDR_CONFIG:
return adev->gfx.config.gb_addr_config;
case mmMC_ARB_RAMCFG:
return adev->gfx.config.mc_arb_ramcfg;
case mmGB_TILE_MODE0:
case mmGB_TILE_MODE1:
case mmGB_TILE_MODE2:
case mmGB_TILE_MODE3:
case mmGB_TILE_MODE4:
case mmGB_TILE_MODE5:
case mmGB_TILE_MODE6:
case mmGB_TILE_MODE7:
case mmGB_TILE_MODE8:
case mmGB_TILE_MODE9:
case mmGB_TILE_MODE10:
case mmGB_TILE_MODE11:
case mmGB_TILE_MODE12:
case mmGB_TILE_MODE13:
case mmGB_TILE_MODE14:
case mmGB_TILE_MODE15:
case mmGB_TILE_MODE16:
case mmGB_TILE_MODE17:
case mmGB_TILE_MODE18:
case mmGB_TILE_MODE19:
case mmGB_TILE_MODE20:
case mmGB_TILE_MODE21:
case mmGB_TILE_MODE22:
case mmGB_TILE_MODE23:
case mmGB_TILE_MODE24:
case mmGB_TILE_MODE25:
case mmGB_TILE_MODE26:
case mmGB_TILE_MODE27:
case mmGB_TILE_MODE28:
case mmGB_TILE_MODE29:
case mmGB_TILE_MODE30:
case mmGB_TILE_MODE31:
idx = (reg_offset - mmGB_TILE_MODE0);
return adev->gfx.config.tile_mode_array[idx];
case mmGB_MACROTILE_MODE0:
case mmGB_MACROTILE_MODE1:
case mmGB_MACROTILE_MODE2:
case mmGB_MACROTILE_MODE3:
case mmGB_MACROTILE_MODE4:
case mmGB_MACROTILE_MODE5:
case mmGB_MACROTILE_MODE6:
case mmGB_MACROTILE_MODE7:
case mmGB_MACROTILE_MODE8:
case mmGB_MACROTILE_MODE9:
case mmGB_MACROTILE_MODE10:
case mmGB_MACROTILE_MODE11:
case mmGB_MACROTILE_MODE12:
case mmGB_MACROTILE_MODE13:
case mmGB_MACROTILE_MODE14:
case mmGB_MACROTILE_MODE15:
idx = (reg_offset - mmGB_MACROTILE_MODE0);
return adev->gfx.config.macrotile_mode_array[idx];
default:
return RREG32(reg_offset);
}
}
} }
static int cik_read_register(struct amdgpu_device *adev, u32 se_num, static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
...@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0; *value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
bool indexed = cik_allowed_read_registers[i].grbm_indexed;
if (reg_offset != cik_allowed_read_registers[i].reg_offset) if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue; continue;
*value = cik_allowed_read_registers[i].grbm_indexed ? *value = cik_get_register_value(adev, indexed, se_num, sh_num,
cik_read_indexed_register(adev, se_num, reg_offset);
sh_num, reg_offset) :
RREG32(reg_offset);
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
......
...@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) ...@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.backend_enable_mask, adev->gfx.config.backend_enable_mask,
num_rb_pipes); num_rb_pipes);
} }
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].raster_config =
RREG32(mmPA_SC_RASTER_CONFIG);
adev->gfx.config.rb_config[i][j].raster_config_1 =
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
} }
......
...@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { ...@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/printk.h>
#include "kfd_priv.h" #include "kfd_priv.h"
#define KFD_DRIVER_AUTHOR "AMD Inc. and others" #define KFD_DRIVER_AUTHOR "AMD Inc. and others"
...@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void) ...@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq(); kfd_process_destroy_wq();
kfd_topology_shutdown(); kfd_topology_shutdown();
kfd_chardev_exit(); kfd_chardev_exit();
dev_info(kfd_device, "Removed module\n"); pr_info("amdkfd: Removed module\n");
} }
module_init(kfd_module_init); module_init(kfd_module_init);
......
...@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, ...@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct cik_sdma_rlc_registers *m; struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd); m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
......
...@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) { switch (type) {
case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA:
if (dev->dqm->queue_count >=
CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
}
retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_COMPUTE: case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */ /* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
......
...@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) ...@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector); aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch) { if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id); aconnector, aconnector->base.base.id);
...@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) ...@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
aconnector->dc_sink = NULL; aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector); amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock); mutex_unlock(&aconnector->hpd_lock);
...@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) ...@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
drm_atomic_state_put(adev->dm.cached_state);
adev->dm.cached_state = NULL; adev->dm.cached_state = NULL;
amdgpu_dm_irq_resume_late(adev); amdgpu_dm_irq_resume_late(adev);
...@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) ...@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
.link = aconnector->dc_link, .link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL .sink_signal = SIGNAL_TYPE_VIRTUAL
}; };
struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; struct edid *edid;
if (!aconnector->base.edid_blob_ptr || if (!aconnector->base.edid_blob_ptr ||
!aconnector->base.edid_blob_ptr->data) { !aconnector->base.edid_blob_ptr->data) {
...@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) ...@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
return; return;
} }
edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
aconnector->edid = edid; aconnector->edid = edid;
aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_em_sink = dc_link_add_remote_sink(
...@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
if (!dm_new_crtc_state->stream)
continue;
status = dc_stream_get_status(dm_new_crtc_state->stream); status = dc_stream_get_status(dm_new_crtc_state->stream);
WARN_ON(!status); WARN_ON(!status);
WARN_ON(!status->plane_count); WARN_ON(!status->plane_count);
if (!dm_new_crtc_state->stream)
continue;
/*TODO How it works with MPO ?*/ /*TODO How it works with MPO ?*/
if (!dc_commit_planes_to_stream( if (!dc_commit_planes_to_stream(
dm->dc, dm->dc,
...@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank) if (wait_for_vblank)
drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_helper_cleanup_planes(dev, state);
} }
...@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev, ...@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
return; return;
disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); if (!disconnected_acrtc)
return;
if (!disconnected_acrtc || !acrtc_state->stream) acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
if (!acrtc_state->stream)
return; return;
/* /*
...@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc, ...@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc,
} }
} }
if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false; new_crtc_state->mode_changed = false;
...@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
} }
} else { } else {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed)
continue; continue;
if (!new_crtc_state->enable) if (!new_crtc_state->enable)
......
...@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx, ...@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx,
if (signal == signal_type_info_tbl[i].type) if (signal == signal_type_info_tbl[i].type)
break; break;
if (i == NUM_ELEMENTS(signal_type_info_tbl))
goto fail;
dm_logger_append(&entry, "[%s][ConnIdx:%d] ", dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
signal_type_info_tbl[i].name, signal_type_info_tbl[i].name,
link->link_index);