diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
64 files changed, 1382 insertions, 1442 deletions
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index f98356be0af2..c479f0c0dd5c 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -2,9 +2,10 @@ config DRM_RADEON tristate "ATI Radeon" - depends on DRM && PCI && MMU + depends on DRM && PCI depends on AGP || !AGP select FW_LOADER + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_KMS_HELPER @@ -12,6 +13,7 @@ config DRM_RADEON select DRM_TTM select DRM_TTM_HELPER select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION + select DRM_EXEC select SND_HDA_COMPONENT if SND_HDA_CORE select POWER_SUPPLY select HWMON diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 5bc3e6b41c34..b31125eb9a65 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -27,7 +27,7 @@ #include <linux/slab.h> #include <linux/string_helpers.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/drm_device.h> #include <drm/drm_util.h> diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 2db40789235c..1afa70566985 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -3825,8 +3825,7 @@ typedef struct _ATOM_DPCD_INFO // note2: From RV770, the memory is more than 32bit addressable, so we will change // ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains // exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware -// (in offset to start of memory address) is KB aligned instead of byte aligend. -/***********************************************************************************/ +// (in offset to start of memory address) is KB aligned instead of byte aligned. // Note3: /* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter, for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have: @@ -5072,7 +5071,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 #define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 #define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 #define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 -#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 +#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10 /********************************************************************************************************************** ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fca8b08535a5..3f3c360dce4b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; - int ret; radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; - radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev; if (ASIC_IS_DCE5(rdev)) { if (radeon_auxch) @@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; } - ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); - if (!ret) - radeon_connector->ddc_bus->has_aux = true; - - WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret); + drm_dp_aux_init(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ @@ -472,7 +467,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector, } int radeon_dp_mode_valid_helper(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; @@ -506,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) u8 link_status[DP_LINK_STATUS_SIZE]; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -683,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -746,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2bff0d9e20f5..3d9f47bc807a 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -201,7 +201,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && (rdev->pdev->device == 0x6741) && - !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) + !(dmi_match(DMI_PRODUCT_NAME, "iMac12,1") || dmi_match(DMI_PRODUCT_NAME, "iMac12,2"))) return; if (!radeon_encoder->enc_priv) @@ -249,7 +249,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, */ if (bd->props.brightness == 0) bd->props.brightness = RADEON_MAX_BL_LEVEL; - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); DRM_INFO("radeon atom DIG backlight initialized\n"); @@ -701,7 +701,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if (radeon_connector->use_digital && (radeon_connector->audio == RADEON_AUDIO_ENABLE)) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) @@ -720,7 +720,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if (radeon_audio != 0) { if (radeon_connector->audio == RADEON_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else @@ -737,14 +737,14 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { if (radeon_audio != 0 && - drm_detect_monitor_audio(radeon_connector_edid(connector)) && + connector->display_info.has_audio && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) return ATOM_ENCODER_MODE_DP_AUDIO; return ATOM_ENCODER_MODE_DP; } else if (radeon_audio != 0) { if (radeon_connector->audio == RADEON_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else @@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) break; case DRM_MODE_CONNECTOR_eDP: if (radeon_audio != 0 && - drm_detect_monitor_audio(radeon_connector_edid(connector)) && + connector->display_info.has_audio && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) return ATOM_ENCODER_MODE_DP_AUDIO; return ATOM_ENCODER_MODE_DP; @@ -1832,7 +1832,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) return; } - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } @@ -2179,7 +2179,7 @@ assigned: void radeon_atom_encoder_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index abe9d65cc460..ba8db1d07c07 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -2457,7 +2457,7 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev, u32 tmp, tmp2; tmp = RREG32(MC_SEQ_MISC0); - patch = ((tmp & 0x0000f00) == 0x300) ? true : false; + patch = (tmp & 0x0000f00) == 0x300; if (patch && ((rdev->pdev->device == 0x67B0) || @@ -3238,7 +3238,8 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev) u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS; SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; - u32 i, ret; + int ret; + u32 i; memset(levels, 0, level_array_size); @@ -3285,7 +3286,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev) u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY; SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; - u32 i, ret; + int ret; + u32 i; memset(levels, 0, level_array_size); @@ -3405,12 +3407,8 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev) &rdev->pm.dpm.dyn_state.cac_leakage_table; u32 i; - if (allowed_sclk_vddc_table == NULL) - return -EINVAL; if (allowed_sclk_vddc_table->count < 1) return -EINVAL; - if (allowed_mclk_table == NULL) - return -EINVAL; if (allowed_mclk_table->count < 1) return -EINVAL; @@ -3440,7 +3438,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev) pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = allowed_sclk_vddc_table->entries[i].clk; pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; + i == 0; pi->dpm_table.sclk_table.count++; } } @@ -3453,7 +3451,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev) pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = allowed_mclk_table->entries[i].clk; pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; + i == 0; pi->dpm_table.mclk_table.count++; } } @@ -3468,24 +3466,20 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev) pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; - if (allowed_mclk_table) { - for (i = 0; i < allowed_mclk_table->count; i++) { - pi->dpm_table.vddci_table.dpm_levels[i].value = - allowed_mclk_table->entries[i].v; - pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; - } - pi->dpm_table.vddci_table.count = allowed_mclk_table->count; + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.vddci_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; } + pi->dpm_table.vddci_table.count = allowed_mclk_table->count; allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; - if (allowed_mclk_table) { - for (i = 0; i < allowed_mclk_table->count; i++) { - pi->dpm_table.mvdd_table.dpm_levels[i].value = - allowed_mclk_table->entries[i].v; - pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; - } - pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.mvdd_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; } + pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; ci_setup_default_pcie_tables(rdev); @@ -4495,7 +4489,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev, bool patch; tmp = RREG32(MC_SEQ_MISC0); - patch = ((tmp & 0x0000f00) == 0x300) ? true : false; + patch = (tmp & 0x0000f00) == 0x300; if (patch && ((rdev->pdev->device == 0x67B0) || @@ -4880,16 +4874,10 @@ static int ci_set_private_data_variables_based_on_pptable(struct radeon_device * struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; - if (allowed_sclk_vddc_table == NULL) - return -EINVAL; if (allowed_sclk_vddc_table->count < 1) return -EINVAL; - if (allowed_mclk_vddc_table == NULL) - return -EINVAL; if (allowed_mclk_vddc_table->count < 1) return -EINVAL; - if (allowed_mclk_vddci_table == NULL) - return -EINVAL; if (allowed_mclk_vddci_table->count < 1) return -EINVAL; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b5e96a8fc2c1..51a3e0fc2f56 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7585,7 +7585,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); + drm_handle_vblank(rdev_to_drm(rdev), 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -7615,7 +7615,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); + drm_handle_vblank(rdev_to_drm(rdev), 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -7645,7 +7645,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); + drm_handle_vblank(rdev_to_drm(rdev), 2); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -7675,7 +7675,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); + drm_handle_vblank(rdev_to_drm(rdev), 3); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -7705,7 +7705,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); + drm_handle_vblank(rdev_to_drm(rdev), 4); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -7735,7 +7735,7 @@ restart_ih: DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); + drm_handle_vblank(rdev_to_drm(rdev), 5); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev) */ int cik_init(struct radeon_device *rdev) { - struct radeon_ring *ring; + struct radeon_ring *ring, *ring_cp1, *ring_cp2; int r; /* Read BIOS */ @@ -8581,7 +8581,7 @@ int cik_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); @@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_index); + ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; + ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; + ring_cp1->ring_obj = NULL; + ring_cp2->ring_obj = NULL; + ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS; + ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS; + + r600_ring_init(rdev, ring_cp1, 1024 * 1024); + r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index); if (r) return r; - ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_index); + r600_ring_init(rdev, ring_cp2, 1024 * 1024); + r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index); if (r) - return r; + goto out; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; ring->ring_obj = NULL; @@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev) r = r600_pcie_gart_init(rdev); if (r) - return r; + goto out; rdev->accel_working = true; r = cik_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); + radeon_doorbell_free(rdev, ring_cp1->doorbell_index); + radeon_doorbell_free(rdev, ring_cp2->doorbell_index); + ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS; + ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS; cik_cp_fini(rdev); cik_sdma_fini(rdev); cik_irq_fini(rdev); @@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev) */ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { DRM_ERROR("radeon: MC ucode required for NI+.\n"); - return -EINVAL; + r = -EINVAL; + goto out; } return 0; + +out: + radeon_doorbell_free(rdev, ring_cp1->doorbell_index); + radeon_doorbell_free(rdev, ring_cp2->doorbell_index); + return r; } /** @@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev) */ void cik_fini(struct radeon_device *rdev) { + struct radeon_ring *ring; radeon_pm_fini(rdev); cik_cp_fini(rdev); cik_sdma_fini(rdev); @@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); uvd_v1_0_fini(rdev); + ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; + radeon_doorbell_free(rdev, ring->doorbell_index); + ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; + radeon_doorbell_free(rdev, ring->doorbell_index); radeon_uvd_fini(rdev); radeon_vce_fini(rdev); cik_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 4c06f47453fd..d6ab93ed9ec4 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -91,7 +91,7 @@ struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev) pin = &rdev->audio.pin[i]; pin_count = 0; - list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) { if (radeon_encoder_is_digital(encoder)) { radeon_encoder = to_radeon_encoder(encoder); dig = radeon_encoder->enc_priv; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c634dc28e6c3..bc4ab71613a5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1673,7 +1673,7 @@ void evergreen_pm_misc(struct radeon_device *rdev) */ void evergreen_pm_prepare(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -1698,7 +1698,7 @@ void evergreen_pm_prepare(struct radeon_device *rdev) */ void evergreen_pm_finish(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -1763,7 +1763,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, */ void evergreen_hpd_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned enabled = 0; u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | @@ -1804,7 +1804,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) */ void evergreen_hpd_fini(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned disabled = 0; @@ -4753,7 +4753,7 @@ restart_ih: event_name = "vblank"; if (rdev->irq.crtc_vblank_int[crtc_idx]) { - drm_handle_vblank(rdev->ddev, crtc_idx); + drm_handle_vblank(rdev_to_drm(rdev), crtc_idx); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -5211,7 +5211,7 @@ int evergreen_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); /* initialize AGP */ diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 1fe6e0d883c7..1162cb5d75ed 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -33,8 +33,10 @@ #include "evergreen_reg_safe.h" #include "cayman_reg_safe.h" +#ifndef MIN #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm) @@ -209,7 +211,7 @@ static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p, surf->base_align = track->group_size; surf->palign = palign; surf->halign = 1; - if (surf->nbx & (palign - 1)) { + if ((surf->nbx & (palign - 1)) && !(palign == 64 && surf->nbx == 32)) { if (prefix) { dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", __func__, __LINE__, prefix, surf->nbx, palign); @@ -395,7 +397,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r; mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; @@ -433,14 +435,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i return r; } - offset = track->cb_color_bo_offset[id] << 8; + offset = (u64)track->cb_color_bo_offset[id] << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n", __func__, __LINE__, id, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->cb_color_bo[id])) { /* old ddx are broken they allocate bo with w*h*bpp but * program slice with ALIGN(h, 8), catch this and patch @@ -448,14 +450,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i */ if (!surf.mode) { uint32_t *ib = p->ib.ptr; - unsigned long tmp, nby, bsize, size, min = 0; + u64 tmp, nby, bsize, size, min = 0; /* find the height the ddx wants */ if (surf.nby > 8) { min = surf.nby - 8; } bsize = radeon_bo_size(track->cb_color_bo[id]); - tmp = track->cb_color_bo_offset[id] << 8; + tmp = (u64)track->cb_color_bo_offset[id] << 8; for (nby = surf.nby; nby > min; nby--) { size = nby * surf.nbx * surf.bpe * surf.nsamples; if ((tmp + size * mslice) <= bsize) { @@ -467,7 +469,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i slice = ((nby * surf.nbx) / 64) - 1; if (!evergreen_surface_check(p, &surf, "cb")) { /* check if this one works */ - tmp += surf.layer_size * mslice; + tmp += (u64)surf.layer_size * mslice; if (tmp <= bsize) { ib[track->cb_color_slice_idx[id]] = slice; goto old_ddx_ok; @@ -476,9 +478,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i } } dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " - "offset %d, max layer %d, bo size %ld, slice %d)\n", + "offset %llu, max layer %d, bo size %ld, slice %d)\n", __func__, __LINE__, id, surf.layer_size, - track->cb_color_bo_offset[id] << 8, mslice, + (u64)track->cb_color_bo_offset[id] << 8, mslice, radeon_bo_size(track->cb_color_bo[id]), slice); dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", __func__, __LINE__, surf.nbx, surf.nby, @@ -562,7 +564,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r; mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -608,18 +610,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return r; } - offset = track->db_s_read_offset << 8; + offset = (u64)track->db_s_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_read_bo)) { dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_read_offset << 8, mslice, + (u64)track->db_s_read_offset << 8, mslice, radeon_bo_size(track->db_s_read_bo)); dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, @@ -627,18 +629,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return -EINVAL; } - offset = track->db_s_write_offset << 8; + offset = (u64)track->db_s_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_write_bo)) { dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_write_offset << 8, mslice, + (u64)track->db_s_write_offset << 8, mslice, radeon_bo_size(track->db_s_write_bo)); return -EINVAL; } @@ -659,7 +661,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r; mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -706,34 +708,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) return r; } - offset = track->db_z_read_offset << 8; + offset = (u64)track->db_z_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_read_bo)) { dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_read_offset << 8, mslice, + (u64)track->db_z_read_offset << 8, mslice, radeon_bo_size(track->db_z_read_bo)); return -EINVAL; } - offset = track->db_z_write_offset << 8; + offset = (u64)track->db_z_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_write_bo)) { dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_write_offset << 8, mslice, + (u64)track->db_z_write_offset << 8, mslice, radeon_bo_size(track->db_z_write_bo)); return -EINVAL; } @@ -949,13 +951,13 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { - DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", - i, offset, - radeon_bo_size(track->vgt_strmout_bo[i])); + dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", + i, offset, + radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { - dev_warn(p->dev, "No buffer for streamout %d\n", i); + dev_warn_once(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } @@ -977,8 +979,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { - dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", - __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); + dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* check cb */ @@ -1054,8 +1056,8 @@ static int evergreen_packet0_check(struct radeon_cs_parser *p, case EVERGREEN_VLINE_START_END: r = evergreen_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); return r; } break; @@ -1141,8 +1143,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_VSTMP_RING_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1153,15 +1155,15 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case CAYMAN_DB_EQAA: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } break; case CAYMAN_DB_DEPTH_INFO: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } break; @@ -1170,8 +1172,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] &= ~Z_ARRAY_MODE(0xf); @@ -1212,8 +1214,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_Z_READ_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_z_read_offset = radeon_get_ib_value(p, idx); @@ -1224,8 +1226,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_Z_WRITE_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_z_write_offset = radeon_get_ib_value(p, idx); @@ -1236,8 +1238,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_STENCIL_READ_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_s_read_offset = radeon_get_ib_value(p, idx); @@ -1248,8 +1250,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_STENCIL_WRITE_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_s_write_offset = radeon_get_ib_value(p, idx); @@ -1271,8 +1273,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case VGT_STRMOUT_BUFFER_BASE_3: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; @@ -1293,8 +1295,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CP_COHER_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "missing reloc for CP_COHER_BASE " - "0x%04X\n", reg); + dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1309,8 +1311,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case PA_SC_AA_CONFIG: if (p->rdev->family >= CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; @@ -1318,8 +1320,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case CAYMAN_PA_SC_AA_CONFIG: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK; @@ -1358,8 +1360,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); @@ -1376,8 +1378,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); @@ -1437,8 +1439,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_ATTRIB: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -1465,8 +1467,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR11_ATTRIB: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -1553,8 +1555,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 0x3c; @@ -1569,8 +1571,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR11_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; @@ -1582,8 +1584,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_HTILE_DATA_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx); @@ -1700,36 +1702,36 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_ALU_CONST_CACHE_LS_15: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: if (p->rdev->family >= CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONFIG_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONFIG_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case CAYMAN_SX_SCATTER_EXPORT_BASE: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1738,7 +1740,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; @@ -1793,7 +1795,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 1) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1805,13 +1807,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return 0; if (pred_op > 2) { - DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); + dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1825,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { - DRM_ERROR("bad CONTEXT_CONTROL\n"); + dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); return -EINVAL; } break; @@ -1833,17 +1835,17 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case PACKET3_NUM_INSTANCES: case PACKET3_CLEAR_STATE: if (pkt->count) { - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; case CAYMAN_PACKET3_DEALLOC_STATE: if (p->rdev->family < CHIP_CAYMAN) { - DRM_ERROR("bad PACKET3_DEALLOC_STATE\n"); + dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n"); return -EINVAL; } if (pkt->count) { - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; @@ -1852,12 +1854,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 1) { - DRM_ERROR("bad INDEX_BASE\n"); + dev_warn_once(p->dev, "bad INDEX_BASE\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad INDEX_BASE\n"); + dev_warn_once(p->dev, "bad INDEX_BASE\n"); return -EINVAL; } @@ -1870,7 +1872,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -1878,7 +1880,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case PACKET3_INDEX_BUFFER_SIZE: { if (pkt->count != 0) { - DRM_ERROR("bad INDEX_BUFFER_SIZE\n"); + dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n"); return -EINVAL; } break; @@ -1887,12 +1889,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, { uint64_t offset; if (pkt->count != 3) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } @@ -1905,7 +1907,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -1915,12 +1917,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 4) { - DRM_ERROR("bad DRAW_INDEX_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DRAW_INDEX_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); return -EINVAL; } @@ -1933,63 +1935,63 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { - DRM_ERROR("bad DRAW_INDEX_AUTO\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_MULTI_AUTO: if (pkt->count != 2) { - DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { - DRM_ERROR("bad DRAW_INDEX_IMMD\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET: if (pkt->count != 2) { - DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET_2: if (pkt->count != 3) { - DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -2003,19 +2005,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32] */ if (pkt->count != 2) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } /* currently only supporting setting indirect draw buffer base address */ if (idx_value != 1) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } @@ -2037,54 +2039,54 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context */ if (pkt->count != 1) { - DRM_ERROR("bad DRAW_INDIRECT\n"); + dev_warn_once(p->dev, "bad DRAW_INDIRECT\n"); return -EINVAL; } if (idx_value + size > track->indirect_draw_buffer_size) { - dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", - idx_value, size, track->indirect_draw_buffer_size); + dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", + idx_value, size, track->indirect_draw_buffer_size); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DISPATCH_DIRECT: if (pkt->count != 3) { - DRM_ERROR("bad DISPATCH_DIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DISPATCH_INDIRECT: if (pkt->count != 1) { - DRM_ERROR("bad DISPATCH_INDIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DISPATCH_INDIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); return -EINVAL; } ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff); r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ @@ -2093,7 +2095,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } @@ -2104,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); ib[idx+2] = upper_32_bits(offset) & 0xff; } else if (idx_value & 0x100) { - DRM_ERROR("cannot use PFP on REG wait\n"); + dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); return -EINVAL; } break; @@ -2113,7 +2115,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u32 command, size, info; u64 offset, tmp; if (pkt->count != 4) { - DRM_ERROR("bad CP DMA\n"); + dev_warn_once(p->dev, "bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); @@ -2127,7 +2129,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if (size % 4) { - DRM_ERROR("CP DMA command requires dw count alignment\n"); + dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n"); return -EINVAL; } } @@ -2135,19 +2137,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* src address space is register */ /* GDS is ok */ if (((info & 0x60000000) >> 29) != 1) { - DRM_ERROR("CP DMA SAS not supported\n"); + dev_warn_once(p->dev, "CP DMA SAS not supported\n"); return -EINVAL; } } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { - DRM_ERROR("CP DMA SAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ if (((info & 0x60000000) >> 29) == 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad CP DMA SRC\n"); + dev_warn_once(p->dev, "bad CP DMA SRC\n"); return -EINVAL; } @@ -2157,15 +2159,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx] = offset; ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); } else if (((info & 0x60000000) >> 29) != 2) { - DRM_ERROR("bad CP DMA SRC_SEL\n"); + dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n"); return -EINVAL; } } @@ -2173,19 +2175,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* dst address space is register */ /* GDS is ok */ if (((info & 0x00300000) >> 20) != 1) { - DRM_ERROR("CP DMA DAS not supported\n"); + dev_warn_once(p->dev, "CP DMA DAS not supported\n"); return -EINVAL; } } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { - DRM_ERROR("CP DMA DAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); return -EINVAL; } if (((info & 0x00300000) >> 20) == 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad CP DMA DST\n"); + dev_warn_once(p->dev, "bad CP DMA DST\n"); return -EINVAL; } @@ -2195,15 +2197,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+2] = offset; ib[idx+3] = upper_32_bits(offset) & 0xff; } else { - DRM_ERROR("bad CP DMA DST_SEL\n"); + dev_warn_once(p->dev, "bad CP DMA DST_SEL\n"); return -EINVAL; } } @@ -2211,13 +2213,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } case PACKET3_PFP_SYNC_ME: if (pkt->count) { - DRM_ERROR("bad PFP_SYNC_ME\n"); + dev_warn_once(p->dev, "bad PFP_SYNC_ME\n"); return -EINVAL; } break; case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ @@ -2225,7 +2227,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, radeon_get_ib_value(p, idx + 2) != 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -2233,7 +2235,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { @@ -2241,7 +2243,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->gpu_offset + @@ -2257,12 +2259,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 4) { - DRM_ERROR("bad EVENT_WRITE_EOP\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE_EOP\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); return -EINVAL; } @@ -2279,12 +2281,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 3) { - DRM_ERROR("bad EVENT_WRITE_EOS\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE_EOS\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); return -EINVAL; } @@ -2302,7 +2304,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { @@ -2319,7 +2321,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { @@ -2332,7 +2334,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_SET_RESOURCE: if (pkt->count % 8) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; @@ -2340,7 +2342,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_RESOURCE_START) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 8); i++) { @@ -2353,7 +2355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* tex base */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (tex)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -2390,7 +2392,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } else { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (tex)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); return -EINVAL; } moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -2409,14 +2411,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* vtx base */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (vtx)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*8)+0); size = radeon_get_ib_value(p, idx+1+(i*8)+1); if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ - dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n"); + dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", + size + offset, radeon_bo_size(reloc->robj)); ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; } @@ -2429,7 +2432,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } } @@ -2443,7 +2446,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_BOOL_CONST_START) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { - DRM_ERROR("bad SET_BOOL_CONST\n"); + dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); return -EINVAL; } break; @@ -2453,7 +2456,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_LOOP_CONST_START) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { - DRM_ERROR("bad SET_LOOP_CONST\n"); + dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); return -EINVAL; } break; @@ -2463,13 +2466,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CTL_CONST_START) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { - DRM_ERROR("bad SET_CTL_CONST\n"); + dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; @@ -2477,13 +2480,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_SAMPLER_START) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ @@ -2491,14 +2494,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2510,14 +2513,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2530,23 +2533,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; if (pkt->count != 3) { - DRM_ERROR("bad MEM_WRITE (invalid count)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { - DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2556,7 +2559,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } case PACKET3_COPY_DW: if (pkt->count != 4) { - DRM_ERROR("bad COPY_DW (invalid count)\n"); + dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { @@ -2564,14 +2567,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* SRC is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COPY_DW (missing src reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2581,8 +2584,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* SRC is a reg. */ reg = radeon_get_ib_value(p, idx+1) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 1); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 1); return -EINVAL; } } @@ -2591,14 +2594,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* DST is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2608,8 +2611,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* DST is a reg. */ reg = radeon_get_ib_value(p, idx+3) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 3); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 3); return -EINVAL; } } @@ -2620,7 +2623,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint32_t allowed_reg_base; uint32_t source_sel; if (pkt->count != 2) { - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n"); return -EINVAL; } @@ -2630,8 +2633,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, areg = idx_value >> 16; if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { - dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n", - areg, idx); + dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n", + areg, idx); return -EINVAL; } @@ -2641,7 +2644,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint32_t swap; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx + 1); @@ -2654,15 +2657,104 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, ib[idx+1] = (offset & 0xfffffffc) | swap; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { - DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n"); + return -EINVAL; + } + break; + } + case PACKET3_COND_EXEC: + { + u64 offset; + + if (pkt->count != 2) { + dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n"); + return -EINVAL; + } + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 0); + offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL; + if (offset & 0x7) { + dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n"); + return -EINVAL; + } + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } + offset += reloc->gpu_offset; + ib[idx + 0] = offset; + ib[idx + 1] = upper_32_bits(offset) & 0xff; break; } + case PACKET3_COND_WRITE: + if (pkt->count != 7) { + dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n"); + return -EINVAL; + } + if (idx_value & 0x10) { + u64 offset; + /* POLL is memory. */ + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 1); + offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32; + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); + return -EINVAL; + } + offset += reloc->gpu_offset; + ib[idx + 1] = offset; + ib[idx + 2] = upper_32_bits(offset) & 0xff; + } else { + /* POLL is a reg. */ + reg = radeon_get_ib_value(p, idx + 1) << 2; + if (!evergreen_is_safe_reg(p, reg)) { + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 1); + return -EINVAL; + } + } + if (idx_value & 0x100) { + u64 offset; + /* WRITE is memory. */ + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 5); + offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32; + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); + return -EINVAL; + } + offset += reloc->gpu_offset; + ib[idx + 5] = offset; + ib[idx + 6] = upper_32_bits(offset) & 0xff; + } else { + /* WRITE is a reg. */ + reg = radeon_get_ib_value(p, idx + 5) << 2; + if (!evergreen_is_safe_reg(p, reg)) { + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 5); + return -EINVAL; + } + } + break; case PACKET3_NOP: break; default: - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; @@ -2762,7 +2854,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) r = evergreen_packet3_check(p, &pkt); break; default: - DRM_ERROR("Unknown packet type %d !\n", pkt.type); + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); kfree(p->track); p->track = NULL; return -EINVAL; @@ -2805,8 +2897,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) do { if (p->idx >= ib_chunk->length_dw) { - DRM_ERROR("Can not parse packet at %d after CS end %d !\n", - p->idx, ib_chunk->length_dw); + dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", + p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; @@ -2819,7 +2911,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_WRITE\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); return -EINVAL; } switch (sub_cmd) { @@ -2841,24 +2933,24 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += count + 3; break; default: - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", + dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } switch (sub_cmd) { @@ -2870,13 +2962,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -2910,13 +3002,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -2929,13 +3021,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", - src_offset + count, radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", + src_offset + count, radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", - dst_offset + count, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", + dst_offset + count, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff); @@ -2948,7 +3040,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x41: /* L2L, partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff); @@ -2963,7 +3055,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* L2L, dw, broadcast */ r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -2973,18 +3065,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+3); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -2998,12 +3090,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* Copy L2T Frame to Field */ case 0x48: if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3013,18 +3105,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3037,7 +3129,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x49: /* L2T, T2L partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); return -EINVAL; } /* detile bit */ @@ -3060,12 +3152,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4b: /* L2T, broadcast */ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3075,18 +3167,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3121,13 +3213,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -3136,7 +3228,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4d: /* T2T partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); @@ -3147,12 +3239,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4f: /* L2T, broadcast */ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3162,18 +3254,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3183,21 +3275,21 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += 10; break; default: - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); return -EINVAL; } break; case DMA_PACKET_CONSTANT_FILL: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", + dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -3208,7 +3300,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += 1; break; default: - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); + dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunk_ib->length_dw); @@ -3339,7 +3431,7 @@ static bool evergreen_vm_reg_valid(u32 reg) case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: return true; default: - DRM_ERROR("Invalid register 0x%x in CS\n", reg); + DRM_DEBUG("Invalid register 0x%x in CS\n", reg); return false; } } @@ -3357,7 +3449,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, break; case PACKET3_SET_BASE: if (idx_value != 1) { - DRM_ERROR("bad SET_BASE"); + dev_warn_once(rdev->dev, "bad SET_BASE"); return -EINVAL; } break; @@ -3404,7 +3496,12 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, case CAYMAN_PACKET3_DEALLOC_STATE: break; case PACKET3_COND_WRITE: - if (idx_value & 0x100) { + if (!(idx_value & 0x10)) { + reg = ib[idx + 1] * 4; + if (!evergreen_vm_reg_valid(reg)) + return -EINVAL; + } + if (!(idx_value & 0x100)) { reg = ib[idx + 5] * 4; if (!evergreen_vm_reg_valid(reg)) return -EINVAL; @@ -3423,7 +3520,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { @@ -3443,7 +3540,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if ((command & 0x1fffff) % 4) { - DRM_ERROR("CP DMA command requires dw count alignment\n"); + dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n"); return -EINVAL; } } @@ -3454,14 +3551,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if (command & PACKET3_CP_DMA_CMD_SAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); return -EINVAL; } } @@ -3475,14 +3572,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if (command & PACKET3_CP_DMA_CMD_DAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); return -EINVAL; } } @@ -3495,7 +3592,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, uint32_t allowed_reg_base; if (pkt->count != 2) { - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); + dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n"); return -EINVAL; } @@ -3505,8 +3602,8 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, areg = idx_value >> 16; if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { - DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n", - areg, idx); + dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n", + areg, idx); return -EINVAL; } break; @@ -3585,7 +3682,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += count + 3; break; default: - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]); + dev_warn_once(rdev->dev, + "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", + idx, ib->ptr[idx]); return -EINVAL; } break; @@ -3636,7 +3735,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += 10; break; default: - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]); + dev_warn_once(rdev->dev, + "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", + idx, ib->ptr[idx]); return -EINVAL; } break; @@ -3647,7 +3748,7 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += 1; break; default: - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); + dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (idx < ib->length_dw); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 681119c91d94..09dda114e218 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -412,7 +412,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (enable) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector && connector->display_info.has_audio) { WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, HDMI_AVI_INFO_SEND | /* enable AVI info frames */ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ @@ -450,8 +450,7 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) if (!dig || !dig->afmt) return; - if (enable && connector && - drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (enable && connector && connector->display_info.has_audio) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 55dbf450bd9c..4aa050385284 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2329,7 +2329,7 @@ static int kv_parse_sys_info_table(struct radeon_device *rdev) le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); } if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) pi->caps_enable_dfs_bypass = true; sumo_construct_sclk_voltage_mapping_table(rdev, diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 77aee99e473a..3890911fe693 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2360,7 +2360,7 @@ int cayman_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); /* initialize memory controller */ diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index e08559c44a5c..82edbfb259bf 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -3397,7 +3397,7 @@ static int ni_enable_smc_cac(struct radeon_device *rdev, if (PPSMC_Result_OK != smc_result) ret = -EINVAL; - ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false; + ni_pi->cac_enabled = PPSMC_Result_OK == smc_result; } } else if (ni_pi->cac_enabled) { smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index b7f22597ee95..969a8fb0ee9e 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h @@ -439,7 +439,7 @@ typedef struct _StateArray{ //how many states we have UCHAR ucNumEntries; - ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); + ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; }StateArray; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0b1e19345f43..07a9c523a17a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -459,7 +459,7 @@ void r100_pm_misc(struct radeon_device *rdev) */ void r100_pm_prepare(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -490,7 +490,7 @@ void r100_pm_prepare(struct radeon_device *rdev) */ void r100_pm_finish(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -603,7 +603,7 @@ void r100_hpd_set_polarity(struct radeon_device *rdev, */ void r100_hpd_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned enable = 0; @@ -626,7 +626,7 @@ void r100_hpd_init(struct radeon_device *rdev) */ void r100_hpd_fini(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned disable = 0; @@ -798,7 +798,7 @@ int r100_irq_process(struct radeon_device *rdev) /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); + drm_handle_vblank(rdev_to_drm(rdev), 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -807,7 +807,7 @@ int r100_irq_process(struct radeon_device *rdev) } if (status & RADEON_CRTC2_VBLANK_STAT) { if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); + drm_handle_vblank(rdev_to_drm(rdev), 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -1016,45 +1016,65 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) DRM_DEBUG_KMS("\n"); - if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || - (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) { + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RV200: + case CHIP_RS100: + case CHIP_RS200: DRM_INFO("Loading R100 Microcode\n"); fw_name = FIRMWARE_R100; - } else if ((rdev->family == CHIP_R200) || - (rdev->family == CHIP_RV250) || - (rdev->family == CHIP_RV280) || - (rdev->family == CHIP_RS300)) { + break; + + case CHIP_R200: + case CHIP_RV250: + case CHIP_RV280: + case CHIP_RS300: DRM_INFO("Loading R200 Microcode\n"); fw_name = FIRMWARE_R200; - } else if ((rdev->family == CHIP_R300) || - (rdev->family == CHIP_R350) || - (rdev->family == CHIP_RV350) || - (rdev->family == CHIP_RV380) || - (rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { + break; + + case CHIP_R300: + case CHIP_R350: + case CHIP_RV350: + case CHIP_RV380: + case CHIP_RS400: + case CHIP_RS480: DRM_INFO("Loading R300 Microcode\n"); fw_name = FIRMWARE_R300; - } else if ((rdev->family == CHIP_R420) || - (rdev->family == CHIP_R423) || - (rdev->family == CHIP_RV410)) { + break; + + case CHIP_R420: + case CHIP_R423: + case CHIP_RV410: DRM_INFO("Loading R400 Microcode\n"); fw_name = FIRMWARE_R420; - } else if ((rdev->family == CHIP_RS690) || - (rdev->family == CHIP_RS740)) { + break; + + case CHIP_RS690: + case CHIP_RS740: DRM_INFO("Loading RS690/RS740 Microcode\n"); fw_name = FIRMWARE_RS690; - } else if (rdev->family == CHIP_RS600) { + break; + + case CHIP_RS600: DRM_INFO("Loading RS600 Microcode\n"); fw_name = FIRMWARE_RS600; - } else if ((rdev->family == CHIP_RV515) || - (rdev->family == CHIP_R520) || - (rdev->family == CHIP_RV530) || - (rdev->family == CHIP_R580) || - (rdev->family == CHIP_RV560) || - (rdev->family == CHIP_RV570)) { + break; + + case CHIP_RV515: + case CHIP_R520: + case CHIP_RV530: + case CHIP_R580: + case CHIP_RV560: + case CHIP_RV570: DRM_INFO("Loading R500 Microcode\n"); fw_name = FIRMWARE_R520; + break; + + default: + DRM_ERROR("Unsupported Radeon family %u\n", rdev->family); + return -EINVAL; } err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); @@ -1278,8 +1298,8 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1293,7 +1313,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, tile_flags |= RADEON_DST_TILE_MACRO; if (reloc->tiling_flags & RADEON_TILING_MICRO) { if (reg == RADEON_SRC_PITCH_OFFSET) { - DRM_ERROR("Cannot src blit from microtiled surface\n"); + dev_warn_once(p->dev, "Cannot src blit from microtiled surface\n"); radeon_cs_dump_packet(p, pkt); return -EINVAL; } @@ -1322,8 +1342,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, track = (struct r100_cs_track *)p->track; c = radeon_get_ib_value(p, idx++) & 0x1F; if (c > 16) { - DRM_ERROR("Only 16 vertex buffers are allowed %d\n", - pkt->opcode); + dev_warn_once(p->dev, "Only 16 vertex buffers are allowed %d\n", + pkt->opcode); radeon_cs_dump_packet(p, pkt); return -EINVAL; } @@ -1331,8 +1351,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, for (i = 0; i < (c - 1); i += 2, idx += 3) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", + pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1344,8 +1364,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, track->arrays[i + 0].esize &= 0x7F; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", + pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1357,8 +1377,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, if (c & 1) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", + pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1450,12 +1470,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) /* check its a wait until and only 1 count */ if (waitreloc.reg != RADEON_WAIT_UNTIL || waitreloc.count != 0) { - DRM_ERROR("vline wait had illegal wait until segment\n"); + dev_warn_once(p->dev, "vline wait had illegal wait until segment\n"); return -EINVAL; } if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { - DRM_ERROR("vline wait had illegal wait until\n"); + dev_warn_once(p->dev, "vline wait had illegal wait until\n"); return -EINVAL; } @@ -1471,9 +1491,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); reg = R100_CP_PACKET0_GET_REG(header); - crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id); + crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id); if (!crtc) { - DRM_ERROR("cannot find crtc %d\n", crtc_id); + dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id); return -ENOENT; } radeon_crtc = to_radeon_crtc(crtc); @@ -1494,7 +1514,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; break; default: - DRM_ERROR("unknown crtc reloc\n"); + dev_warn_once(p->dev, "unknown crtc reloc\n"); return -EINVAL; } ib[h_idx] = header; @@ -1579,7 +1599,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", idx, reg); radeon_cs_dump_packet(p, pkt); return r; @@ -1596,8 +1616,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_DEPTHOFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1609,8 +1629,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_COLOROFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1625,8 +1645,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, i = (reg - RADEON_PP_TXOFFSET_0) / 24; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1652,8 +1672,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1670,8 +1690,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1688,8 +1708,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1706,8 +1726,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_COLORPITCH: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1748,8 +1768,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->cb[0].cpp = 4; break; default: - DRM_ERROR("Invalid color buffer format (%d) !\n", - ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", + ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); return -EINVAL; } track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); @@ -1777,8 +1797,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_ZPASS_ADDR: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1907,10 +1927,10 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, idx = pkt->idx + 1; value = radeon_get_ib_value(p, idx + 2); if ((value + 1) > radeon_bo_size(robj)) { - DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " - "(need %u have %lu) !\n", - value + 1, - radeon_bo_size(robj)); + dev_warn_once(p->dev, "[drm] Buffer too small for PACKET3 INDX_BUFFER " + "(need %u have %lu) !\n", + value + 1, + radeon_bo_size(robj)); return -EINVAL; } return 0; @@ -1937,7 +1957,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, case PACKET3_INDX_BUFFER: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1951,7 +1971,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1972,7 +1992,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_3D_DRAW_IMMD: if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); @@ -1985,7 +2005,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, /* triggers drawing using in-packet vertex data */ case PACKET3_3D_DRAW_IMMD_2: if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vap_vf_cntl = radeon_get_ib_value(p, idx); @@ -2031,7 +2051,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, case PACKET3_NOP: break; default: - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; @@ -2073,8 +2093,8 @@ int r100_cs_parse(struct radeon_cs_parser *p) r = r100_packet3_check(p, &pkt); break; default: - DRM_ERROR("Unknown packet type %d !\n", - pkt.type); + dev_warn_once(p->dev, "Unknown packet type %d !\n", + pkt.type); return -EINVAL; } if (r) @@ -2085,19 +2105,19 @@ int r100_cs_parse(struct radeon_cs_parser *p) static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) { - DRM_ERROR("pitch %d\n", t->pitch); - DRM_ERROR("use_pitch %d\n", t->use_pitch); - DRM_ERROR("width %d\n", t->width); - DRM_ERROR("width_11 %d\n", t->width_11); - DRM_ERROR("height %d\n", t->height); - DRM_ERROR("height_11 %d\n", t->height_11); - DRM_ERROR("num levels %d\n", t->num_levels); - DRM_ERROR("depth %d\n", t->txdepth); - DRM_ERROR("bpp %d\n", t->cpp); - DRM_ERROR("coordinate type %d\n", t->tex_coord_type); - DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); - DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); - DRM_ERROR("compress format %d\n", t->compress_format); + DRM_DEBUG("pitch %d\n", t->pitch); + DRM_DEBUG("use_pitch %d\n", t->use_pitch); + DRM_DEBUG("width %d\n", t->width); + DRM_DEBUG("width_11 %d\n", t->width_11); + DRM_DEBUG("height %d\n", t->height); + DRM_DEBUG("height_11 %d\n", t->height_11); + DRM_DEBUG("num levels %d\n", t->num_levels); + DRM_DEBUG("depth %d\n", t->txdepth); + DRM_DEBUG("bpp %d\n", t->cpp); + DRM_DEBUG("coordinate type %d\n", t->tex_coord_type); + DRM_DEBUG("width round to power of 2 %d\n", t->roundup_w); + DRM_DEBUG("height round to power of 2 %d\n", t->roundup_h); + DRM_DEBUG("compress format %d\n", t->compress_format); } static int r100_track_compress_size(int compress_format, int w, int h) @@ -2152,8 +2172,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev, size += track->textures[idx].cube_info[face].offset; if (size > radeon_bo_size(cube_robj)) { - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", - size, radeon_bo_size(cube_robj)); + dev_warn_once(rdev->dev, + "Cube texture offset greater than object size %lu %lu\n", + size, radeon_bo_size(cube_robj)); r100_cs_track_texture_print(&track->textures[idx]); return -1; } @@ -2176,7 +2197,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, continue; robj = track->textures[u].robj; if (robj == NULL) { - DRM_ERROR("No texture bound to unit %u\n", u); + dev_warn_once(rdev->dev, "No texture bound to unit %u\n", u); return -EINVAL; } size = 0; @@ -2229,13 +2250,13 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, size *= 6; break; default: - DRM_ERROR("Invalid texture coordinate type %u for unit " - "%u\n", track->textures[u].tex_coord_type, u); + dev_warn_once(rdev->dev, "Invalid texture coordinate type %u for unit " + "%u\n", track->textures[u].tex_coord_type, u); return -EINVAL; } if (size > radeon_bo_size(robj)) { - DRM_ERROR("Texture of unit %u needs %lu bytes but is " - "%lu\n", u, size, radeon_bo_size(robj)); + dev_warn_once(rdev->dev, "Texture of unit %u needs %lu bytes but is " + "%lu\n", u, size, radeon_bo_size(robj)); r100_cs_track_texture_print(&track->textures[u]); return -EINVAL; } @@ -2257,18 +2278,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < num_cb; i++) { if (track->cb[i].robj == NULL) { - DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); + dev_warn_once(rdev->dev, "[drm] No buffer for color buffer %d !\n", i); return -EINVAL; } size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; size += track->cb[i].offset; if (size > radeon_bo_size(track->cb[i].robj)) { - DRM_ERROR("[drm] Buffer too small for color buffer %d " - "(need %lu have %lu) !\n", i, size, - radeon_bo_size(track->cb[i].robj)); - DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", - i, track->cb[i].pitch, track->cb[i].cpp, - track->cb[i].offset, track->maxy); + dev_warn_once(rdev->dev, "[drm] Buffer too small for color buffer %d " + "(need %lu have %lu) !\n", i, size, + radeon_bo_size(track->cb[i].robj)); + dev_warn_once(rdev->dev, "[drm] color buffer %d (%u %u %u %u)\n", + i, track->cb[i].pitch, track->cb[i].cpp, + track->cb[i].offset, track->maxy); return -EINVAL; } } @@ -2276,18 +2297,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) if (track->zb_dirty && track->z_enabled) { if (track->zb.robj == NULL) { - DRM_ERROR("[drm] No buffer for z buffer !\n"); + dev_warn_once(rdev->dev, "[drm] No buffer for z buffer !\n"); return -EINVAL; } size = track->zb.pitch * track->zb.cpp * track->maxy; size += track->zb.offset; if (size > radeon_bo_size(track->zb.robj)) { - DRM_ERROR("[drm] Buffer too small for z buffer " - "(need %lu have %lu) !\n", size, - radeon_bo_size(track->zb.robj)); - DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", - track->zb.pitch, track->zb.cpp, - track->zb.offset, track->maxy); + dev_warn_once(rdev->dev, "[drm] Buffer too small for z buffer " + "(need %lu have %lu) !\n", size, + radeon_bo_size(track->zb.robj)); + dev_warn_once(rdev->dev, "[drm] zbuffer (%u %u %u %u)\n", + track->zb.pitch, track->zb.cpp, + track->zb.offset, track->maxy); return -EINVAL; } } @@ -2295,19 +2316,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) if (track->aa_dirty && track->aaresolve) { if (track->aa.robj == NULL) { - DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); + dev_warn_once(rdev->dev, "[drm] No buffer for AA resolve buffer %d !\n", i); return -EINVAL; } /* I believe the format comes from colorbuffer0. */ size = track->aa.pitch * track->cb[0].cpp * track->maxy; size += track->aa.offset; if (size > radeon_bo_size(track->aa.robj)) { - DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " - "(need %lu have %lu) !\n", i, size, - radeon_bo_size(track->aa.robj)); - DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", - i, track->aa.pitch, track->cb[0].cpp, - track->aa.offset, track->maxy); + dev_warn_once(rdev->dev, "[drm] Buffer too small for AA resolve buffer %d " + "(need %lu have %lu) !\n", i, size, + radeon_bo_size(track->aa.robj)); + dev_warn_once(rdev->dev, "[drm] AA resolve buffer %d (%u %u %u %u)\n", + i, track->aa.pitch, track->cb[0].cpp, + track->aa.offset, track->maxy); return -EINVAL; } } @@ -2324,17 +2345,17 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < track->num_arrays; i++) { size = track->arrays[i].esize * track->max_indx * 4UL; if (track->arrays[i].robj == NULL) { - DRM_ERROR("(PW %u) Vertex array %u no buffer " - "bound\n", prim_walk, i); + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); return -EINVAL; } if (size > radeon_bo_size(track->arrays[i].robj)) { - dev_err(rdev->dev, "(PW %u) Vertex array %u " - "need %lu dwords have %lu dwords\n", - prim_walk, i, size >> 2, - radeon_bo_size(track->arrays[i].robj) - >> 2); - DRM_ERROR("Max indices %u\n", track->max_indx); + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); + dev_warn_once(rdev->dev, "Max indices %u\n", track->max_indx); return -EINVAL; } } @@ -2343,16 +2364,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < track->num_arrays; i++) { size = track->arrays[i].esize * (nverts - 1) * 4UL; if (track->arrays[i].robj == NULL) { - DRM_ERROR("(PW %u) Vertex array %u no buffer " - "bound\n", prim_walk, i); + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); return -EINVAL; } if (size > radeon_bo_size(track->arrays[i].robj)) { - dev_err(rdev->dev, "(PW %u) Vertex array %u " - "need %lu dwords have %lu dwords\n", - prim_walk, i, size >> 2, - radeon_bo_size(track->arrays[i].robj) - >> 2); + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); return -EINVAL; } } @@ -2360,16 +2381,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) case 3: size = track->vtx_size * nverts; if (size != track->immd_dwords) { - DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", - track->immd_dwords, size); - DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", - nverts, track->vtx_size); + dev_warn_once(rdev->dev, "IMMD draw %u dwors but needs %lu dwords\n", + track->immd_dwords, size); + dev_warn_once(rdev->dev, "VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", + nverts, track->vtx_size); return -EINVAL; } break; default: - DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", - prim_walk); + dev_warn_once(rdev->dev, "[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", + prim_walk); return -EINVAL; } @@ -3059,7 +3080,7 @@ DEFINE_SHOW_ATTRIBUTE(r100_debugfs_mc_info); void r100_debugfs_rbbm_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("r100_rbbm_info", 0444, root, rdev, &r100_debugfs_rbbm_info_fops); @@ -3069,7 +3090,7 @@ void r100_debugfs_rbbm_init(struct radeon_device *rdev) void r100_debugfs_cp_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("r100_cp_ring_info", 0444, root, rdev, &r100_debugfs_cp_ring_info_fops); @@ -3081,7 +3102,7 @@ void r100_debugfs_cp_init(struct radeon_device *rdev) void r100_debugfs_mc_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("r100_mc_info", 0444, root, rdev, &r100_debugfs_mc_info_fops); @@ -3947,7 +3968,7 @@ int r100_resume(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* post */ - radeon_combios_asic_init(rdev->ddev); + radeon_combios_asic_init(rdev_to_drm(rdev)); /* Resume clock after posting */ r100_clock_startup(rdev); /* Initialize surface registers */ @@ -4056,7 +4077,7 @@ int r100_init(struct radeon_device *rdev) /* Set asic errata */ r100_errata(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f5f2ffea5ab2..10a65a71de31 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -163,8 +163,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -180,8 +180,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_DEPTHOFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -193,8 +193,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_COLOROFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -212,8 +212,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, i = (reg - R200_PP_TXOFFSET_0) / 24; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -265,8 +265,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -283,8 +283,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_COLORPITCH: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -326,12 +326,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->cb[0].cpp = 4; break; default: - DRM_ERROR("Invalid color buffer format (%d) !\n", - ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", + ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); return -EINVAL; } if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) { - DRM_ERROR("No support for depth xy offset in kms\n"); + dev_warn_once(p->dev, "No support for depth xy offset in kms\n"); return -EINVAL; } @@ -360,8 +360,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, case RADEON_RB3D_ZPASS_ADDR: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 1620f534f55f..d2ee6deec039 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r300_gpu_init(struct radeon_device *rdev) +/* rs400_gpu_init also calls this! */ +void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; @@ -616,7 +617,7 @@ DEFINE_SHOW_ATTRIBUTE(rv370_debugfs_pcie_gart_info); static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("rv370_pcie_gart_info", 0444, root, rdev, &rv370_debugfs_pcie_gart_info_fops); @@ -644,8 +645,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case RADEON_CRTC_GUI_TRIG_VLINE: r = r100_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -663,8 +664,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, i = (reg - R300_RB3D_COLOROFFSET0) >> 2; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -676,8 +677,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_ZB_DEPTHOFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -705,8 +706,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, i = (reg - R300_TX_OFFSET_0) >> 2; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -761,7 +762,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_CCTL */ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ p->rdev->cmask_filp != p->filp) { - DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); + dev_warn_once(p->dev, "Invalid RB3D_CCTL: Cannot enable CMASK.\n"); return -EINVAL; } track->num_cb = ((idx_value >> 5) & 0x3) + 1; @@ -778,8 +779,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -811,8 +812,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 5: if (p->rdev->family < CHIP_RV515) { - DRM_ERROR("Invalid color buffer format (%d)!\n", - ((idx_value >> 21) & 0xF)); + dev_warn_once(p->dev, "Invalid color buffer format (%d)!\n", + ((idx_value >> 21) & 0xF)); return -EINVAL; } fallthrough; @@ -826,8 +827,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->cb[i].cpp = 16; break; default: - DRM_ERROR("Invalid color buffer format (%d) !\n", - ((idx_value >> 21) & 0xF)); + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", + ((idx_value >> 21) & 0xF)); return -EINVAL; } track->cb_dirty = true; @@ -852,8 +853,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->zb.cpp = 4; break; default: - DRM_ERROR("Invalid z buffer format (%d) !\n", - (idx_value & 0xF)); + dev_warn_once(p->dev, "Invalid z buffer format (%d) !\n", + (idx_value & 0xF)); return -EINVAL; } track->zb_dirty = true; @@ -863,8 +864,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -961,8 +962,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case R300_TX_FORMAT_ATI2N: if (p->rdev->family < CHIP_R420) { - DRM_ERROR("Invalid texture format %u\n", - (idx_value & 0x1F)); + dev_warn_once(p->dev, "Invalid texture format %u\n", + (idx_value & 0x1F)); return -EINVAL; } /* The same rules apply as for DXT3/5. */ @@ -973,8 +974,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].compress_format = R100_TRACK_COMP_DXT35; break; default: - DRM_ERROR("Invalid texture format %u\n", - (idx_value & 0x1F)); + dev_warn_once(p->dev, "Invalid texture format %u\n", + (idx_value & 0x1F)); return -EINVAL; } track->tex_dirty = true; @@ -1040,7 +1041,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, R100_TRACK_COMP_DXT1; } } else if (idx_value & (1 << 14)) { - DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); + dev_warn_once(p->dev, "Forbidden bit TXFORMAT_MSB\n"); return -EINVAL; } track->tex_dirty = true; @@ -1078,8 +1079,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_ZB_ZPASS_ADDR: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1120,8 +1121,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_RB3D_AARESOLVE_OFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); radeon_cs_dump_packet(p, pkt); return r; } @@ -1190,7 +1191,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, case PACKET3_INDX_BUFFER: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); radeon_cs_dump_packet(p, pkt); return r; } @@ -1206,7 +1207,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, * PRIM_WALK must be equal to 3 vertex data in embedded * in cmd stream */ if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); @@ -1221,7 +1222,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, * PRIM_WALK must be equal to 3 vertex data in embedded * in cmd stream */ if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } track->vap_vf_cntl = radeon_get_ib_value(p, idx); @@ -1271,7 +1272,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, case PACKET3_NOP: break; default: - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; @@ -1307,7 +1308,7 @@ int r300_cs_parse(struct radeon_cs_parser *p) r = r300_packet3_check(p, &pkt); break; default: - DRM_ERROR("Unknown packet type %d !\n", pkt.type); + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); return -EINVAL; } if (r) { @@ -1452,7 +1453,7 @@ int r300_resume(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* post */ - radeon_combios_asic_init(rdev->ddev); + radeon_combios_asic_init(rdev_to_drm(rdev)); /* Resume clock after posting */ r300_clock_startup(rdev); /* Initialize surface registers */ @@ -1538,7 +1539,7 @@ int r300_init(struct radeon_device *rdev) /* Set asic errata */ r300_errata(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index a979662eaa73..9a31cdec6415 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -322,7 +322,7 @@ int r420_resume(struct radeon_device *rdev) if (rdev->is_atom_bios) { atom_asic_init(rdev->mode_info.atom_context); } else { - radeon_combios_asic_init(rdev->ddev); + radeon_combios_asic_init(rdev_to_drm(rdev)); } /* Resume clock after posting */ r420_clock_resume(rdev); @@ -414,7 +414,7 @@ int r420_init(struct radeon_device *rdev) return -EINVAL; /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); @@ -493,7 +493,7 @@ DEFINE_SHOW_ATTRIBUTE(r420_debugfs_pipes_info); void r420_debugfs_pipes_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("r420_pipes_info", 0444, root, rdev, &r420_debugfs_pipes_info_fops); diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 6cbcaa845192..08e127b3249a 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -287,7 +287,7 @@ int r520_init(struct radeon_device *rdev) atom_asic_init(rdev->mode_info.atom_context); } /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 087d41e370fd..8b62f7faa5b9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -950,7 +950,7 @@ void r600_hpd_set_polarity(struct radeon_device *rdev, void r600_hpd_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned enable = 0; @@ -1017,7 +1017,7 @@ void r600_hpd_init(struct radeon_device *rdev) void r600_hpd_fini(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned disable = 0; @@ -3280,7 +3280,7 @@ int r600_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); if (rdev->flags & RADEON_IS_AGP) { @@ -4136,7 +4136,7 @@ restart_ih: DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); + drm_handle_vblank(rdev_to_drm(rdev), 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -4166,7 +4166,7 @@ restart_ih: DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); + drm_handle_vblank(rdev_to_drm(rdev), 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -4358,7 +4358,7 @@ DEFINE_SHOW_ATTRIBUTE(r600_debugfs_mc_info); static void r600_debugfs_mc_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("r600_mc_info", 0444, root, rdev, &r600_debugfs_mc_info_fops); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 6cf54a747749..8eeceeeca362 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -361,9 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { - dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", - __func__, __LINE__, format, - i, track->cb_color_info[i]); + dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", + __func__, __LINE__, format, + i, track->cb_color_info[i]); return -EINVAL; } /* pitch in pixels */ @@ -384,9 +384,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { - dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, - G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, - track->cb_color_info[i]); + dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, + track->cb_color_info[i]); return -EINVAL; } switch (array_mode) { @@ -402,25 +402,26 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) case V_0280A0_ARRAY_2D_TILED_THIN1: break; default: - dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, - G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, - track->cb_color_info[i]); + dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, + track->cb_color_info[i]); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", - __func__, __LINE__, pitch, pitch_align, array_mode); + dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { - dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", - __func__, __LINE__, height, height_align, array_mode); + dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, - base_offset, base_align, array_mode); + dev_warn_once(p->dev, + "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, + base_offset, base_align, array_mode); return -EINVAL; } @@ -447,13 +448,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) * broken userspace. */ } else { - dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", - __func__, i, array_mode, - track->cb_color_bo_offset[i], tmp, - radeon_bo_size(track->cb_color_bo[i]), - pitch, height, r600_fmt_get_nblocksx(format, pitch), - r600_fmt_get_nblocksy(format, height), - r600_fmt_get_blocksize(format)); + dev_warn_once(p->dev, + "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", + __func__, i, array_mode, + track->cb_color_bo_offset[i], tmp, + radeon_bo_size(track->cb_color_bo[i]), + pitch, height, r600_fmt_get_nblocksx(format, pitch), + r600_fmt_get_nblocksy(format, height), + r600_fmt_get_blocksize(format)); return -EINVAL; } } @@ -478,11 +480,11 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) if (bytes + track->cb_color_frag_offset[i] > radeon_bo_size(track->cb_color_frag_bo[i])) { - dev_warn(p->dev, "%s FMASK_TILE_MAX too large " - "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", - __func__, tile_max, bytes, - track->cb_color_frag_offset[i], - radeon_bo_size(track->cb_color_frag_bo[i])); + dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large " + "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, tile_max, bytes, + track->cb_color_frag_offset[i], + radeon_bo_size(track->cb_color_frag_bo[i])); return -EINVAL; } } @@ -496,17 +498,17 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) if (bytes + track->cb_color_tile_offset[i] > radeon_bo_size(track->cb_color_tile_bo[i])) { - dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " - "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", - __func__, block_max, bytes, - track->cb_color_tile_offset[i], - radeon_bo_size(track->cb_color_tile_bo[i])); + dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large " + "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, block_max, bytes, + track->cb_color_tile_offset[i], + radeon_bo_size(track->cb_color_tile_bo[i])); return -EINVAL; } break; } default: - dev_warn(p->dev, "%s invalid tile mode\n", __func__); + dev_warn_once(p->dev, "%s invalid tile mode\n", __func__); return -EINVAL; } return 0; @@ -526,7 +528,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) if (track->db_bo == NULL) { - dev_warn(p->dev, "z/stencil with no depth buffer\n"); + dev_warn_once(p->dev, "z/stencil with no depth buffer\n"); return -EINVAL; } switch (G_028010_FORMAT(track->db_depth_info)) { @@ -544,20 +546,22 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) bpe = 8; break; default: - dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); + dev_warn_once(p->dev, + "z/stencil with invalid format %d\n", + G_028010_FORMAT(track->db_depth_info)); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { if (!track->db_depth_size_idx) { - dev_warn(p->dev, "z/stencil buffer size not set\n"); + dev_warn_once(p->dev, "z/stencil buffer size not set\n"); return -EINVAL; } tmp = radeon_bo_size(track->db_bo) - track->db_offset; tmp = (tmp / bpe) >> 6; if (!tmp) { - dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", - track->db_depth_size, bpe, track->db_offset, - radeon_bo_size(track->db_bo)); + dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", + track->db_depth_size, bpe, track->db_offset, + radeon_bo_size(track->db_bo)); return -EINVAL; } ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); @@ -579,9 +583,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) array_check.blocksize = bpe; if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, - G_028010_ARRAY_MODE(track->db_depth_info), - track->db_depth_info); + dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, + G_028010_ARRAY_MODE(track->db_depth_info), + track->db_depth_info); return -EINVAL; } switch (array_mode) { @@ -592,24 +596,24 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) case V_028010_ARRAY_2D_TILED_THIN1: break; default: - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, - G_028010_ARRAY_MODE(track->db_depth_info), - track->db_depth_info); + dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, + G_028010_ARRAY_MODE(track->db_depth_info), + track->db_depth_info); return -EINVAL; } if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", + dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { - dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", + dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, + dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, base_offset, base_align, array_mode); return -EINVAL; } @@ -618,10 +622,11 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews * track->nsamples; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { - dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", - array_mode, - track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, - radeon_bo_size(track->db_bo)); + dev_warn_once(p->dev, + "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", + array_mode, + track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, + radeon_bo_size(track->db_bo)); return -EINVAL; } } @@ -632,13 +637,13 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) unsigned nbx, nby; if (track->htile_bo == NULL) { - dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", - __func__, __LINE__, track->db_depth_info); + dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", + __func__, __LINE__, track->db_depth_info); return -EINVAL; } if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { - dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", - __func__, __LINE__, track->db_depth_size); + dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", + __func__, __LINE__, track->db_depth_size); return -EINVAL; } @@ -676,8 +681,8 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) nby = round_up(nby, 16 * 8); break; default: - dev_warn(p->dev, "%s:%d invalid num pipes %d\n", - __func__, __LINE__, track->npipes); + dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n", + __func__, __LINE__, track->npipes); return -EINVAL; } } @@ -689,9 +694,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) size += track->htile_offset; if (size > radeon_bo_size(track->htile_bo)) { - dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", - __func__, __LINE__, radeon_bo_size(track->htile_bo), - size, nbx, nby); + dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", + __func__, __LINE__, radeon_bo_size(track->htile_bo), + size, nbx, nby); return -EINVAL; } } @@ -718,13 +723,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { - DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", - i, offset, - radeon_bo_size(track->vgt_strmout_bo[i])); + dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", + i, offset, + radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { - dev_warn(p->dev, "No buffer for streamout %d\n", i); + dev_warn_once(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } @@ -753,8 +758,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { - dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", - __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); + dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* perform rewrite of CB_COLOR[0-7]_SIZE */ @@ -841,33 +846,33 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, /* check its a WAIT_REG_MEM */ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 || wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { - DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); + dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n"); return -EINVAL; } wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); /* bit 4 is reg (0) or mem (1) */ if (wait_reg_mem_info & 0x10) { - DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n"); + dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n"); return -EINVAL; } /* bit 8 is me (0) or pfp (1) */ if (wait_reg_mem_info & 0x100) { - DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n"); + dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n"); return -EINVAL; } /* waiting for value to be equal */ if ((wait_reg_mem_info & 0x7) != 0x3) { - DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); + dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n"); return -EINVAL; } if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) { - DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); + dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n"); return -EINVAL; } if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) { - DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); + dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n"); return -EINVAL; } @@ -884,9 +889,9 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = R600_CP_PACKET0_GET_REG(header); - crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id); + crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id); if (!crtc) { - DRM_ERROR("cannot find crtc %d\n", crtc_id); + dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id); return -ENOENT; } radeon_crtc = to_radeon_crtc(crtc); @@ -907,7 +912,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, ib[h_idx] = header; ib[h_idx + 4] = vline_status[crtc_id] >> 2; } else { - DRM_ERROR("unknown crtc reloc\n"); + dev_warn_once(p->dev, "unknown crtc reloc\n"); return -EINVAL; } return 0; @@ -923,8 +928,8 @@ static int r600_packet0_check(struct radeon_cs_parser *p, case AVIVO_D1MODE_VLINE_START_END: r = r600_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); return r; } break; @@ -972,7 +977,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) i = (reg >> 7); if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } m = 1 << ((reg >> 2) & 31); @@ -1013,8 +1018,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_VSTMP_RING_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1031,8 +1036,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) radeon_cs_packet_next_is_pkt3_nop(p)) { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_depth_info = radeon_get_ib_value(p, idx); @@ -1073,8 +1078,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case VGT_STRMOUT_BUFFER_BASE_3: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; @@ -1096,8 +1101,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CP_COHER_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "missing reloc for CP_COHER_BASE " - "0x%04X\n", reg); + dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1270,8 +1275,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 4; @@ -1285,8 +1290,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_DEPTH_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_offset = radeon_get_ib_value(p, idx) << 8; @@ -1298,8 +1303,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_HTILE_DATA_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8; @@ -1368,8 +1373,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_ALU_CONST_CACHE_VS_15: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1377,8 +1382,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SX_MEMORY_EXPORT_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - dev_warn(p->dev, "bad SET_CONFIG_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1387,7 +1392,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; @@ -1408,7 +1413,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { - unsigned offset, i, level; + unsigned offset, i; unsigned width, height, depth, size; unsigned blocksize; unsigned nbx, nby; @@ -1420,7 +1425,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, w0 = r600_mip_minify(w0, 0); h0 = r600_mip_minify(h0, 0); d0 = r600_mip_minify(d0, 0); - for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { + for (i = 0, offset = 0; i < nlevels; i++) { width = r600_mip_minify(w0, i); nbx = r600_fmt_get_nblocksx(format, width); @@ -1543,43 +1548,43 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, llevel = 0; break; default: - dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); + dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } if (!r600_fmt_is_valid_texture(format, p->family)) { - dev_warn(p->dev, "%s:%d texture invalid format %d\n", - __func__, __LINE__, format); + dev_warn_once(p->dev, "%s:%d texture invalid format %d\n", + __func__, __LINE__, format); return -EINVAL; } if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { - dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", - __func__, __LINE__, G_038000_TILE_MODE(word0)); + dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n", + __func__, __LINE__, G_038000_TILE_MODE(word0)); return -EINVAL; } /* XXX check height as well... */ if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", - __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); + dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", - __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); + dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", + __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(mip_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", - __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); + dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", + __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (blevel > llevel) { - dev_warn(p->dev, "texture blevel %d > llevel %d\n", - blevel, llevel); + dev_warn_once(p->dev, "texture blevel %d > llevel %d\n", + blevel, llevel); } if (is_array) { barray = G_038014_BASE_ARRAY(word5); @@ -1592,16 +1597,16 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ if ((l0_size + word2) > radeon_bo_size(texture)) { - dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", - w0, h0, pitch_align, height_align, - array_check.array_mode, format, word2, - l0_size, radeon_bo_size(texture)); - dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); + dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", + w0, h0, pitch_align, height_align, + array_check.array_mode, format, word2, + l0_size, radeon_bo_size(texture)); + dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { - /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", + /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ } return 0; @@ -1613,13 +1618,13 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) i = (reg >> 7); if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } m = 1 << ((reg >> 2) & 31); if (!(r600_reg_safe_bm[i] & m)) return true; - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return false; } @@ -1648,7 +1653,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 1) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1660,13 +1665,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return 0; if (pred_op > 2) { - DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); + dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1681,20 +1686,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p, case PACKET3_START_3D_CMDBUF: if (p->family >= CHIP_RV770 || pkt->count) { - DRM_ERROR("bad START_3D\n"); + dev_warn_once(p->dev, "bad START_3D\n"); return -EINVAL; } break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { - DRM_ERROR("bad CONTEXT_CONTROL\n"); + dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); return -EINVAL; } break; case PACKET3_INDEX_TYPE: case PACKET3_NUM_INSTANCES: if (pkt->count) { - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n"); return -EINVAL; } break; @@ -1702,12 +1707,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, { uint64_t offset; if (pkt->count != 3) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } @@ -1720,37 +1725,37 @@ static int r600_packet3_check(struct radeon_cs_parser *p, r = r600_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { - DRM_ERROR("bad DRAW_INDEX_AUTO\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD_BE: case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { - DRM_ERROR("bad DRAW_INDEX_IMMD\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = r600_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ @@ -1759,7 +1764,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } @@ -1770,7 +1775,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); ib[idx+2] = upper_32_bits(offset) & 0xff; } else if (idx_value & 0x100) { - DRM_ERROR("cannot use PFP on REG wait\n"); + dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); return -EINVAL; } break; @@ -1779,24 +1784,24 @@ static int r600_packet3_check(struct radeon_cs_parser *p, u32 command, size; u64 offset, tmp; if (pkt->count != 4) { - DRM_ERROR("bad CP DMA\n"); + dev_warn_once(p->dev, "bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); size = command & 0x1fffff; if (command & PACKET3_CP_DMA_CMD_SAS) { /* src address space is register */ - DRM_ERROR("CP DMA SAS not supported\n"); + dev_warn_once(p->dev, "CP DMA SAS not supported\n"); return -EINVAL; } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { - DRM_ERROR("CP DMA SAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad CP DMA SRC\n"); + dev_warn_once(p->dev, "bad CP DMA SRC\n"); return -EINVAL; } @@ -1806,8 +1811,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -1816,17 +1821,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } if (command & PACKET3_CP_DMA_CMD_DAS) { /* dst address space is register */ - DRM_ERROR("CP DMA DAS not supported\n"); + dev_warn_once(p->dev, "CP DMA DAS not supported\n"); return -EINVAL; } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { - DRM_ERROR("CP DMA DAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad CP DMA DST\n"); + dev_warn_once(p->dev, "bad CP DMA DST\n"); return -EINVAL; } @@ -1836,8 +1841,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } @@ -1848,7 +1853,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ @@ -1856,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, radeon_get_ib_value(p, idx + 2) != 0) { r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1864,7 +1869,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { @@ -1872,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->gpu_offset + @@ -1888,12 +1893,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 4) { - DRM_ERROR("bad EVENT_WRITE_EOP\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } @@ -1911,7 +1916,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { @@ -1927,7 +1932,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { @@ -1939,7 +1944,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_SET_RESOURCE: if (pkt->count % 7) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; @@ -1947,7 +1952,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 7); i++) { @@ -1959,7 +1964,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, /* tex base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1973,7 +1978,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, /* tex mip base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1994,15 +1999,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, /* vtx base */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*7)+0); size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ - dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", - size + offset, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", + size + offset, radeon_bo_size(reloc->robj)); ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; } @@ -2015,7 +2020,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } } @@ -2027,7 +2032,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || (start_reg >= PACKET3_SET_ALU_CONST_END) || (end_reg >= PACKET3_SET_ALU_CONST_END)) { - DRM_ERROR("bad SET_ALU_CONST\n"); + dev_warn_once(p->dev, "bad SET_ALU_CONST\n"); return -EINVAL; } } @@ -2038,7 +2043,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { - DRM_ERROR("bad SET_BOOL_CONST\n"); + dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); return -EINVAL; } break; @@ -2048,7 +2053,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { - DRM_ERROR("bad SET_LOOP_CONST\n"); + dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); return -EINVAL; } break; @@ -2058,13 +2063,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { - DRM_ERROR("bad SET_CTL_CONST\n"); + dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; @@ -2072,22 +2077,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BASE_UPDATE: /* RS780 and RS880 also need this */ if (p->family < CHIP_RS780) { - DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); + dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n"); return -EINVAL; } if (pkt->count != 1) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n"); return -EINVAL; } if (idx_value > 3) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n"); return -EINVAL; } { @@ -2095,25 +2100,27 @@ static int r600_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n"); return -EINVAL; } if (reloc->robj != track->vgt_strmout_bo[idx_value]) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n"); return -EINVAL; } - offset = radeon_get_ib_value(p, idx+1) << 8; + offset = (u64)radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", - offset, track->vgt_strmout_bo_offset[idx_value]); + dev_warn_once(p->dev, + "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", + offset, track->vgt_strmout_bo_offset[idx_value]); return -EINVAL; } if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, + "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -2121,17 +2128,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_SURFACE_BASE_UPDATE: if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { - DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); + dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } if (pkt->count) { - DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); + dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ @@ -2139,14 +2146,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, + "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2158,14 +2166,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, + "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2178,23 +2187,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p, u64 offset; if (pkt->count != 3) { - DRM_ERROR("bad MEM_WRITE (invalid count)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { - DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2204,7 +2213,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } case PACKET3_COPY_DW: if (pkt->count != 4) { - DRM_ERROR("bad COPY_DW (invalid count)\n"); + dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { @@ -2212,14 +2221,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p, /* SRC is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad COPY_DW (missing src reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2236,14 +2245,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p, /* DST is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); if (r) { - DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2259,7 +2268,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, case PACKET3_NOP: break; default: - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; @@ -2306,7 +2315,7 @@ int r600_cs_parse(struct radeon_cs_parser *p) r = r600_packet3_check(p, &pkt); break; default: - DRM_ERROR("Unknown packet type %d !\n", pkt.type); + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); kfree(p->track); p->track = NULL; return -EINVAL; @@ -2346,13 +2355,13 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, *cs_reloc = NULL; if (p->chunk_relocs == NULL) { - DRM_ERROR("No relocation chunk !\n"); + dev_warn_once(p->dev, "No relocation chunk !\n"); return -EINVAL; } idx = p->dma_reloc_idx; if (idx >= p->nrelocs) { - DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", - idx, p->nrelocs); + dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n", + idx, p->nrelocs); return -EINVAL; } *cs_reloc = &p->relocs[idx]; @@ -2385,8 +2394,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) do { if (p->idx >= ib_chunk->length_dw) { - DRM_ERROR("Can not parse packet at %d after CS end %d !\n", - p->idx, ib_chunk->length_dw); + dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", + p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; @@ -2399,7 +2408,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_WRITE\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); return -EINVAL; } if (tiled) { @@ -2417,20 +2426,20 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } if (tiled) { @@ -2484,31 +2493,31 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) } } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_CONSTANT_FILL: if (p->family < CHIP_RV770) { - DRM_ERROR("Constant Fill is 7xx only !\n"); + dev_warn_once(p->dev, "Constant Fill is 7xx only !\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_WRITE\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -2519,7 +2528,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) p->idx += 1; break; default: - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); + dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunk_ib->length_dw); diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 64980a61d38a..81d58ef667dd 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -153,7 +153,7 @@ void r600_dpm_print_ps_status(struct radeon_device *rdev, u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 vblank_in_pixels; @@ -180,7 +180,7 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 vrefresh = 0; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f3551ebaa2f0..9758f3a9df75 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -116,7 +116,7 @@ void r600_audio_update_hdmi(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, audio_work); - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct r600_audio_pin audio_status = r600_audio_status(rdev); struct drm_encoder *encoder; bool changed = false; @@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) return result; } -/* - * write the audio workaround status to the hardware - */ -void r600_hdmi_audio_workaround(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - uint32_t offset = dig->afmt->offset; - bool hdmi_audio_workaround = false; /* FIXME */ - u32 value; - - if (!hdmi_audio_workaround || - r600_hdmi_is_audio_buffer_filled(encoder)) - value = 0; /* disable workaround */ - else - value = HDMI0_AUDIO_TEST_EN; /* enable workaround */ - WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, - value, ~HDMI0_AUDIO_TEST_EN); -} - void r600_hdmi_audio_set_dto(struct radeon_device *rdev, struct radeon_crtc *crtc, unsigned int clock) { diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 0999c8eaae94..527b9d19d730 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -75,11 +75,12 @@ #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_execbuf_util.h> +#include <drm/drm_exec.h> #include <drm/drm_gem.h> #include <drm/drm_audio_component.h> #include <drm/drm_suballoc.h> +#include <drm/drm_print.h> #include "radeon_family.h" #include "radeon_mode.h" @@ -394,9 +395,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait_next(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); -int radeon_fence_wait_any(struct radeon_device *rdev, - struct radeon_fence **fences, - bool intr); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); @@ -457,7 +455,8 @@ struct radeon_mman { struct radeon_bo_list { struct radeon_bo *robj; - struct ttm_validate_buffer tv; + struct list_head list; + bool shared; uint64_t gpu_offset; unsigned preferred_domains; unsigned allowed_domains; @@ -1030,6 +1029,7 @@ struct radeon_cs_parser { struct radeon_bo_list *vm_bos; struct list_head validated; unsigned dma_reloc_idx; + struct drm_exec exec; /* indices of various chunks */ struct radeon_cs_chunk *chunk_ib; struct radeon_cs_chunk *chunk_relocs; @@ -1043,7 +1043,6 @@ struct radeon_cs_parser { u32 cs_flags; u32 ring; s32 priority; - struct ww_acquire_ctx ticket; }; static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) @@ -2297,7 +2296,7 @@ typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); struct radeon_device { struct device *dev; - struct drm_device *ddev; + struct drm_device ddev; struct pci_dev *pdev; #ifdef __alpha__ struct pci_controller *hose; @@ -2476,6 +2475,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index); void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); +static inline struct drm_device *rdev_to_drm(struct radeon_device *rdev) +{ + return &rdev->ddev; +} + /* * Cast helper */ diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 603a78e41ba5..08f8ba4fd148 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -405,11 +405,10 @@ static int radeon_atif_handler(struct radeon_device *rdev, if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { if ((rdev->flags & RADEON_IS_PX) && radeon_atpx_dgpu_req_power_for_displays()) { - pm_runtime_get_sync(rdev->ddev->dev); + pm_runtime_get_sync(rdev_to_drm(rdev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(rdev->ddev); - pm_runtime_mark_last_busy(rdev->ddev->dev); - pm_runtime_put_autosuspend(rdev->ddev->dev); + drm_helper_hpd_irq_event(rdev_to_drm(rdev)); + pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); } } /* TODO: check other events */ @@ -736,7 +735,7 @@ int radeon_acpi_init(struct radeon_device *rdev) struct radeon_encoder *target = NULL; /* Find the encoder controlling the brightness */ - list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list, + list_for_each_entry(tmp, &rdev_to_drm(rdev)->mode_config.encoder_list, head) { struct radeon_encoder *enc = to_radeon_encoder(tmp); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index a3d749e350f9..89d7b0e9e79f 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -161,7 +161,7 @@ struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev) static int radeon_agp_head_acquire(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct pci_dev *pdev = to_pci_dev(dev->dev); if (!rdev->agp) diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 1e00f6b99f94..9e697f10f9ca 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev); */ extern int r300_init(struct radeon_device *rdev); extern void r300_fini(struct radeon_device *rdev); +extern void r300_gpu_init(struct radeon_device *rdev); extern int r300_suspend(struct radeon_device *rdev); extern int r300_resume(struct radeon_device *rdev); extern int r300_asic_reset(struct radeon_device *rdev, bool hard); @@ -400,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size); void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock); -void r600_hdmi_audio_workaround(struct drm_encoder *encoder); int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); u32 r600_get_xclk(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 10793a433bf5..81a0a91921b9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -187,7 +187,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); - rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); + rdev->i2c_bus[i] = radeon_i2c_create(rdev_to_drm(rdev), &i2c, stmp); } gpio = (ATOM_GPIO_I2C_ASSIGMENT *) ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); @@ -1716,27 +1716,25 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct case LCD_FAKE_EDID_PATCH_RECORD_TYPE: fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { - struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); - if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - - if (drm_edid_is_valid(edid)) { - rdev->mode_info.bios_hardcoded_edid = edid; - rdev->mode_info.bios_hardcoded_edid_size = edid_size; - } else - kfree(edid); - } + const struct drm_edid *edid; + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size); + if (drm_edid_valid(edid)) + rdev->mode_info.bios_hardcoded_edid = edid; + else + drm_edid_free(edid); + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 74753bb26d33..8d64ba18572e 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -196,7 +196,7 @@ static void radeon_audio_enable(struct radeon_device *rdev, return; if (rdev->mode_info.mode_config_initialized) { - list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) { if (radeon_encoder_is_digital(encoder)) { radeon_encoder = to_radeon_encoder(encoder); dig = radeon_encoder->enc_priv; @@ -303,6 +303,7 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset, static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct cea_sad *sads; int sad_count; @@ -310,7 +311,7 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) if (!connector) return; - sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); if (sad_count <= 0) @@ -326,6 +327,7 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); u8 *sadb = NULL; int sad_count; @@ -333,8 +335,7 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) if (!connector) return; - sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), - &sadb); + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); if (sad_count < 0) { DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); @@ -409,7 +410,7 @@ void radeon_audio_detect(struct drm_connector *connector, radeon_encoder->audio = rdev->audio.hdmi_funcs; } - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector->display_info.has_audio) { if (!dig->pin) dig->pin = radeon_audio_get_pin(encoder); radeon_audio_enable(rdev, dig->pin, 0xf); @@ -646,7 +647,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, if (!connector) return; - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector->display_info.has_audio) { radeon_audio_set_mute(encoder, true); radeon_audio_write_speaker_allocation(encoder); @@ -686,7 +687,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, if (!connector) return; - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector->display_info.has_audio) { radeon_audio_write_speaker_allocation(encoder); radeon_audio_write_sad_regs(encoder); radeon_audio_write_latency_fields(encoder, mode); @@ -759,19 +760,25 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port, if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized) return 0; - list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + const struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + encoder = connector_funcs->best_encoder(connector); + + if (!encoder) + continue; + if (!radeon_encoder_is_digital(encoder)) continue; radeon_encoder = to_radeon_encoder(encoder); dig = radeon_encoder->enc_priv; if (!dig->pin || dig->pin->id != port) continue; - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - continue; *enabled = true; + mutex_lock(&connector->eld_mutex); ret = drm_eld_size(connector->eld); memcpy(buf, connector->eld, min(max_bytes, ret)); + mutex_unlock(&connector->eld_mutex); break; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 6952b1273b0f..df8d7f56b028 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -370,27 +370,22 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { int edid_info, size; - struct edid *edid; + const struct drm_edid *edid; unsigned char *raw; - edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); + edid_info = combios_get_table_offset(rdev_to_drm(rdev), COMBIOS_HARDCODED_EDID_TABLE); if (!edid_info) return false; raw = rdev->bios + edid_info; size = EDID_LENGTH * (raw[0x7e] + 1); - edid = kmalloc(size, GFP_KERNEL); - if (edid == NULL) - return false; - - memcpy((unsigned char *)edid, raw, size); + edid = drm_edid_alloc(raw, size); - if (!drm_edid_is_valid(edid)) { - kfree(edid); + if (!drm_edid_valid(edid)) { + drm_edid_free(edid); return false; } rdev->mode_info.bios_hardcoded_edid = edid; - rdev->mode_info.bios_hardcoded_edid_size = size; return true; } @@ -398,18 +393,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) struct edid * radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { - struct edid *edid; - - if (rdev->mode_info.bios_hardcoded_edid) { - edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); - if (edid) { - memcpy((unsigned char *)edid, - (unsigned char *)rdev->mode_info.bios_hardcoded_edid, - rdev->mode_info.bios_hardcoded_edid_size); - return edid; - } - } - return NULL; + return drm_edid_duplicate(drm_edid_raw(rdev->mode_info.bios_hardcoded_edid)); } static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, @@ -642,7 +626,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct radeon_i2c_bus_rec i2c; u16 offset; u8 id, blocks, clk, data; @@ -670,7 +654,7 @@ static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct r void radeon_combios_i2c_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct radeon_i2c_bus_rec i2c; /* actual hw pads @@ -812,7 +796,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) bool radeon_combios_sideport_present(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); u16 igp_info; /* sideport is AMD only */ @@ -915,7 +899,7 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); uint16_t tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; @@ -2637,7 +2621,7 @@ static const char *thermal_controller_names[] = { void radeon_combios_get_power_modes(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); u16 offset, misc, misc2 = 0; u8 rev, tmp; int state_index = 0; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b84b58926106..012d8b2295b8 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -109,7 +109,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: if (radeon_connector->use_digital) { - if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -117,7 +117,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: - if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -126,7 +126,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || - drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -150,7 +150,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) break; } - if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { /* hdmi deep color only implemented on DCE4+ */ if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", @@ -255,21 +255,6 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, return NULL; } -struct edid *radeon_connector_edid(struct drm_connector *connector) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_property_blob *edid_blob = connector->edid_blob_ptr; - - if (radeon_connector->edid) { - return radeon_connector->edid; - } else if (edid_blob) { - struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); - if (edid) - radeon_connector->edid = edid; - } - return radeon_connector->edid; -} - static void radeon_connector_get_edid(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -520,6 +505,9 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn continue; mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); } } @@ -818,7 +806,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) } static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); @@ -887,10 +875,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -980,7 +966,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) } static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; @@ -1071,17 +1057,15 @@ radeon_vga_detect(struct drm_connector *connector, bool force) */ if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && - rdev->mode_info.bios_hardcoded_edid_size) { + rdev->mode_info.bios_hardcoded_edid) { ret = connector_status_connected; } radeon_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1128,7 +1112,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) } static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) return MODE_CLOCK_RANGE; @@ -1166,10 +1150,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1394,7 +1376,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) out: if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && - rdev->mode_info.bios_hardcoded_edid_size) { + rdev->mode_info.bios_hardcoded_edid) { radeon_connector->use_digital = true; ret = connector_status_connected; } @@ -1414,10 +1396,8 @@ out: } exit: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1459,7 +1439,7 @@ static void radeon_dvi_force(struct drm_connector *connector) } static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; @@ -1478,7 +1458,7 @@ static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connecto (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) return MODE_OK; - else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + else if (ASIC_IS_DCE6(rdev) && connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1726,16 +1706,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; @@ -1774,7 +1752,7 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return radeon_dp_mode_valid_helper(connector, mode); } else { - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (ASIC_IS_DCE6(rdev) && connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1788,6 +1766,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector return MODE_OK; } +static int +radeon_connector_late_register(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int r = 0; + + if (radeon_connector->ddc_bus->has_aux) { + radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; + r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, @@ -1802,6 +1794,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_edp_connector_funcs = { @@ -1812,6 +1805,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { @@ -1822,6 +1816,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; void diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a6700d7278bf..3f9c0011244f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -182,11 +182,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) } } - p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; - p->relocs[i].tv.num_shared = !r->write_domain; - - radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, - priority); + p->relocs[i].shared = !r->write_domain; + radeon_cs_buckets_add(&buckets, &p->relocs[i].list, priority); } radeon_cs_buckets_get_list(&buckets, &p->validated); @@ -197,7 +194,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (need_mmap_lock) mmap_read_lock(current->mm); - r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); + r = radeon_bo_list_validate(p->rdev, &p->exec, &p->validated, p->ring); if (need_mmap_lock) mmap_read_unlock(current->mm); @@ -253,12 +250,11 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) struct radeon_bo_list *reloc; int r; - list_for_each_entry(reloc, &p->validated, tv.head) { + list_for_each_entry(reloc, &p->validated, list) { struct dma_resv *resv; resv = reloc->robj->tbo.base.resv; - r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, - reloc->tv.num_shared); + r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, reloc->shared); if (r) return r; } @@ -276,6 +272,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) s32 priority = 0; INIT_LIST_HEAD(&p->validated); + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); if (!cs->num_chunks) { return 0; @@ -397,8 +394,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) static int cmp_size_smaller_first(void *priv, const struct list_head *a, const struct list_head *b) { - struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); - struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); + struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, list); + struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, list); /* Sort A before B if A is smaller. */ if (la->robj->tbo.base.size > lb->robj->tbo.base.size) @@ -412,16 +409,17 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a, * radeon_cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number - * @backoff: indicator to backoff the reservation * * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ -static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) +static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; if (!error) { + struct radeon_bo_list *reloc; + /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. * This assures that the smallest buffers are added first @@ -433,15 +431,17 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo * per frame under memory pressure. */ list_sort(NULL, &parser->validated, cmp_size_smaller_first); - - ttm_eu_fence_buffer_objects(&parser->ticket, - &parser->validated, - &parser->ib.fence->base); - } else if (backoff) { - ttm_eu_backoff_reservation(&parser->ticket, - &parser->validated); + list_for_each_entry(reloc, &parser->validated, list) { + dma_resv_add_fence(reloc->robj->tbo.base.resv, + &parser->ib.fence->base, + reloc->shared ? + DMA_RESV_USAGE_READ : + DMA_RESV_USAGE_WRITE); + } } + drm_exec_fini(&parser->exec); + if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { struct radeon_bo *bo = parser->relocs[i].robj; @@ -693,7 +693,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); - radeon_cs_parser_fini(&parser, r, false); + radeon_cs_parser_fini(&parser, r); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; @@ -707,7 +707,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } if (r) { - radeon_cs_parser_fini(&parser, r, false); + radeon_cs_parser_fini(&parser, r); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; @@ -724,7 +724,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; } out: - radeon_cs_parser_fini(&parser, r, true); + radeon_cs_parser_fini(&parser, r); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; @@ -834,7 +834,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p, ib = p->ib.ptr; idx = pkt->idx; for (i = 0; i <= (pkt->count + 1); i++, idx++) - DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); + dev_dbg(p->dev, "ib[%d]=0x%08X\n", idx, ib[idx]); } /** diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index afbb3a80c0c6..60afaa8e56b4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -26,7 +26,6 @@ * Jerome Glisse */ -#include <linux/console.h> #include <linux/efi.h> #include <linux/pci.h> #include <linux/pm_runtime.h> @@ -35,6 +34,7 @@ #include <linux/vgaarb.h> #include <drm/drm_cache.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> #include <drm/drm_file.h> @@ -529,7 +529,7 @@ int radeon_wb_init(struct radeon_device *rdev) * @mc: memory controller structure holding memory informations * @base: base address at which to put VRAM * - * Function will place try to place VRAM at base address provided + * Function will try to place VRAM at base address provided * as parameter (which is so far either PCI aperture address or * for IGP TOM base address). * @@ -554,15 +554,15 @@ int radeon_wb_init(struct radeon_device *rdev) * cover the whole aperture even if VRAM size is inferior to aperture size * Novell bug 204882 + along with lots of ubuntu ones * - * Note 3: when limiting vram it's safe to overwritte real_vram_size because + * Note 3: when limiting vram it's safe to overwrite real_vram_size because * we are not in case where real_vram_size is inferior to mc_vram_size (ie - * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu + * not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu * ones) * * Note 4: IGP TOM addr should be the same as the aperture addr, we don't * explicitly check for that thought. * - * FIXME: when reducing VRAM size align new size on power of 2. + * FIXME: when reducing VRAM size, align new size on power of 2. */ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) { @@ -593,7 +593,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 * @rdev: radeon device structure holding all necessary informations * @mc: memory controller structure holding memory informations * - * Function will place try to place GTT before or after VRAM. + * Function will try to place GTT before or after VRAM. * * If GTT size is bigger than space left then we ajust GTT size. * Thus function will never fails. @@ -760,7 +760,7 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev) if (rdev->is_atom_bios) atom_asic_init(rdev->mode_info.atom_context); else - radeon_combios_asic_init(rdev->ddev); + radeon_combios_asic_init(rdev_to_drm(rdev)); return true; } else { dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); @@ -980,7 +980,7 @@ int radeon_atombios_init(struct radeon_device *rdev) return -ENOMEM; rdev->mode_info.atom_card_info = atom_card_info; - atom_card_info->dev = rdev->ddev; + atom_card_info->dev = rdev_to_drm(rdev); atom_card_info->reg_read = cail_reg_read; atom_card_info->reg_write = cail_reg_write; /* needed for iio ops */ @@ -1005,7 +1005,7 @@ int radeon_atombios_init(struct radeon_device *rdev) mutex_init(&rdev->mode_info.atom_context->mutex); mutex_init(&rdev->mode_info.atom_context->scratch_mutex); - radeon_atom_initialize_bios_scratch_regs(rdev->ddev); + radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev)); atom_allocate_fb_scratch(rdev->mode_info.atom_context); return 0; } @@ -1049,7 +1049,7 @@ void radeon_atombios_fini(struct radeon_device *rdev) */ int radeon_combios_init(struct radeon_device *rdev) { - radeon_combios_initialize_bios_scratch_regs(rdev->ddev); + radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev)); return 0; } @@ -1285,9 +1285,6 @@ int radeon_device_init(struct radeon_device *rdev, bool runtime = false; rdev->shutdown = false; - rdev->dev = &pdev->dev; - rdev->ddev = ddev; - rdev->pdev = pdev; rdev->flags = flags; rdev->family = flags & RADEON_FAMILY_MASK; rdev->is_atom_bios = false; @@ -1545,7 +1542,7 @@ void radeon_device_fini(struct radeon_device *rdev) * Called at driver suspend. */ int radeon_suspend_kms(struct drm_device *dev, bool suspend, - bool fbcon, bool freeze) + bool notify_clients, bool freeze) { struct radeon_device *rdev; struct pci_dev *pdev; @@ -1637,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (fbcon) { - console_lock(); - radeon_fbdev_set_suspend(rdev, 1); - console_unlock(); - } + if (notify_clients) + drm_client_dev_suspend(dev); + return 0; } @@ -1652,7 +1647,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, * Returns 0 for success or an error on failure. * Called at driver resume. */ -int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; @@ -1663,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { - console_lock(); - } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - if (fbcon) - console_unlock(); + if (pci_enable_device(pdev)) return -1; - } } /* resume AGP if in use */ radeon_agp_resume(rdev); @@ -1733,7 +1722,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ - if (fbcon) { + if (notify_clients) { drm_helper_resume_force_mode(dev); /* turn on display hw */ drm_modeset_lock_all(dev); @@ -1749,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } + if (notify_clients) + drm_client_dev_resume(dev); return 0; } @@ -1847,7 +1834,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) downgrade_write(&rdev->exclusive_lock); - drm_helper_resume_force_mode(rdev->ddev); + drm_helper_resume_force_mode(rdev_to_drm(rdev)); /* set the power state here in case we are a PX system or headless */ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5f1d24d3120c..35fb99bcd9a7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -302,13 +302,13 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) return; - spin_lock_irqsave(&rdev->ddev->event_lock, flags); + spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags); if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " "RADEON_FLIP_SUBMITTED(%d)\n", radeon_crtc->flip_status, RADEON_FLIP_SUBMITTED); - spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags); return; } @@ -334,7 +334,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) */ if (update_pending && (DRM_SCANOUTPOS_VALID & - radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, + radeon_get_crtc_scanoutpos(rdev_to_drm(rdev), crtc_id, GET_DISTANCE_TO_VBLANKSTART, &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && @@ -347,7 +347,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) */ update_pending = 0; } - spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags); if (!update_pending) radeon_crtc_handle_flip(rdev, crtc_id); } @@ -370,14 +370,14 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) if (radeon_crtc == NULL) return; - spin_lock_irqsave(&rdev->ddev->event_lock, flags); + spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags); work = radeon_crtc->flip_work; if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " "RADEON_FLIP_SUBMITTED(%d)\n", radeon_crtc->flip_status, RADEON_FLIP_SUBMITTED); - spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags); return; } @@ -389,7 +389,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) if (work->event) drm_crtc_send_vblank_event(&radeon_crtc->base, work->event); - spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags); drm_crtc_vblank_put(&radeon_crtc->base); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); @@ -408,7 +408,7 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_flip_work *work = container_of(__work, struct radeon_flip_work, flip_work); struct radeon_device *rdev = work->rdev; - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; @@ -644,8 +644,6 @@ radeon_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - rdev = dev->dev_private; /* if we have active crtcs and we don't have a power ref, take the current one */ @@ -926,10 +924,10 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, unsigned *fb_div, unsigned *ref_div) { /* limit reference * post divider to a maximum */ - ref_div_max = max(min(100 / post_div, ref_div_max), 1u); + ref_div_max = clamp(100 / post_div, 1u, ref_div_max); /* get matching reference and feedback divider */ - *ref_div = min(max(den/post_div, 1u), ref_div_max); + *ref_div = clamp(den / post_div, 1u, ref_div_max); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); /* limit fb divider to its maximum */ @@ -1297,12 +1295,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; fb->obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); if (ret) { fb->obj[0] = NULL; @@ -1314,6 +1313,7 @@ radeon_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * radeon_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; @@ -1340,7 +1340,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); + ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj); if (ret) { kfree(fb); drm_gem_object_put(obj); @@ -1401,7 +1401,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) if (rdev->is_atom_bios) { rdev->mode_info.coherent_mode_property = - drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); + drm_property_create_range(rdev_to_drm(rdev), 0, "coherent", 0, 1); if (!rdev->mode_info.coherent_mode_property) return -ENOMEM; } @@ -1409,57 +1409,57 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) if (!ASIC_IS_AVIVO(rdev)) { sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); rdev->mode_info.tmds_pll_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "tmds_pll", radeon_tmds_pll_enum_list, sz); } rdev->mode_info.load_detect_property = - drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); + drm_property_create_range(rdev_to_drm(rdev), 0, "load detection", 0, 1); if (!rdev->mode_info.load_detect_property) return -ENOMEM; - drm_mode_create_scaling_mode_property(rdev->ddev); + drm_mode_create_scaling_mode_property(rdev_to_drm(rdev)); sz = ARRAY_SIZE(radeon_tv_std_enum_list); rdev->mode_info.tv_std_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "tv standard", radeon_tv_std_enum_list, sz); sz = ARRAY_SIZE(radeon_underscan_enum_list); rdev->mode_info.underscan_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "underscan", radeon_underscan_enum_list, sz); rdev->mode_info.underscan_hborder_property = - drm_property_create_range(rdev->ddev, 0, + drm_property_create_range(rdev_to_drm(rdev), 0, "underscan hborder", 0, 128); if (!rdev->mode_info.underscan_hborder_property) return -ENOMEM; rdev->mode_info.underscan_vborder_property = - drm_property_create_range(rdev->ddev, 0, + drm_property_create_range(rdev_to_drm(rdev), 0, "underscan vborder", 0, 128); if (!rdev->mode_info.underscan_vborder_property) return -ENOMEM; sz = ARRAY_SIZE(radeon_audio_enum_list); rdev->mode_info.audio_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "audio", radeon_audio_enum_list, sz); sz = ARRAY_SIZE(radeon_dither_enum_list); rdev->mode_info.dither_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "dither", radeon_dither_enum_list, sz); sz = ARRAY_SIZE(radeon_output_csc_enum_list); rdev->mode_info.output_csc_property = - drm_property_create_enum(rdev->ddev, 0, + drm_property_create_enum(rdev_to_drm(rdev), 0, "output_csc", radeon_output_csc_enum_list, sz); @@ -1578,29 +1578,29 @@ int radeon_modeset_init(struct radeon_device *rdev) int i; int ret; - drm_mode_config_init(rdev->ddev); + drm_mode_config_init(rdev_to_drm(rdev)); rdev->mode_info.mode_config_initialized = true; - rdev->ddev->mode_config.funcs = &radeon_mode_funcs; + rdev_to_drm(rdev)->mode_config.funcs = &radeon_mode_funcs; if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600) - rdev->ddev->mode_config.async_page_flip = true; + rdev_to_drm(rdev)->mode_config.async_page_flip = true; if (ASIC_IS_DCE5(rdev)) { - rdev->ddev->mode_config.max_width = 16384; - rdev->ddev->mode_config.max_height = 16384; + rdev_to_drm(rdev)->mode_config.max_width = 16384; + rdev_to_drm(rdev)->mode_config.max_height = 16384; } else if (ASIC_IS_AVIVO(rdev)) { - rdev->ddev->mode_config.max_width = 8192; - rdev->ddev->mode_config.max_height = 8192; + rdev_to_drm(rdev)->mode_config.max_width = 8192; + rdev_to_drm(rdev)->mode_config.max_height = 8192; } else { - rdev->ddev->mode_config.max_width = 4096; - rdev->ddev->mode_config.max_height = 4096; + rdev_to_drm(rdev)->mode_config.max_width = 4096; + rdev_to_drm(rdev)->mode_config.max_height = 4096; } - rdev->ddev->mode_config.preferred_depth = 24; - rdev->ddev->mode_config.prefer_shadow = 1; + rdev_to_drm(rdev)->mode_config.preferred_depth = 24; + rdev_to_drm(rdev)->mode_config.prefer_shadow = 1; - rdev->ddev->mode_config.fb_modifiers_not_supported = true; + rdev_to_drm(rdev)->mode_config.fb_modifiers_not_supported = true; ret = radeon_modeset_create_props(rdev); if (ret) { @@ -1618,11 +1618,11 @@ int radeon_modeset_init(struct radeon_device *rdev) /* allocate crtcs */ for (i = 0; i < rdev->num_crtc; i++) { - radeon_crtc_init(rdev->ddev, i); + radeon_crtc_init(rdev_to_drm(rdev), i); } /* okay we should have all the bios connectors */ - ret = radeon_setup_enc_conn(rdev->ddev); + ret = radeon_setup_enc_conn(rdev_to_drm(rdev)); if (!ret) { return ret; } @@ -1639,7 +1639,7 @@ int radeon_modeset_init(struct radeon_device *rdev) /* setup afmt */ radeon_afmt_init(rdev); - drm_kms_helper_poll_init(rdev->ddev); + drm_kms_helper_poll_init(rdev_to_drm(rdev)); /* do pm late init */ ret = radeon_pm_late_init(rdev); @@ -1650,15 +1650,15 @@ int radeon_modeset_init(struct radeon_device *rdev) void radeon_modeset_fini(struct radeon_device *rdev) { if (rdev->mode_info.mode_config_initialized) { - drm_kms_helper_poll_fini(rdev->ddev); + drm_kms_helper_poll_fini(rdev_to_drm(rdev)); radeon_hpd_fini(rdev); - drm_helper_force_disable_all(rdev->ddev); + drm_helper_force_disable_all(rdev_to_drm(rdev)); radeon_afmt_fini(rdev); - drm_mode_config_cleanup(rdev->ddev); + drm_mode_config_cleanup(rdev_to_drm(rdev)); rdev->mode_info.mode_config_initialized = false; } - kfree(rdev->mode_info.bios_hardcoded_edid); + drm_edid_free(rdev->mode_info.bios_hardcoded_edid); /* free i2c buses */ radeon_i2c_fini(rdev); @@ -1722,7 +1722,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((radeon_encoder->underscan_type == UNDERSCAN_ON) || ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && - drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && + connector->display_info.is_hdmi && is_hdtv_mode(mode)))) { if (radeon_encoder->underscan_hborder != 0) radeon_crtc->h_border = radeon_encoder->underscan_hborder; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 7bf08164140e..87fd6255c114 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -29,7 +29,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - +#include <linux/aperture.h> #include <linux/compat.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -37,9 +37,10 @@ #include <linux/mmu_notifier.h> #include <linux/pci.h> -#include <drm/drm_aperture.h> +#include <drm/clients/drm_client_setup.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> #include <drm/drm_pciids.h> @@ -109,9 +110,10 @@ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values * 2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL) + * 2.51.0 - Add evergreen/cayman OpenGL 4.6 compatibility */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 50 +#define KMS_DRIVER_MINOR 51 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_no_wb; @@ -239,27 +241,76 @@ module_param_named(uvd, radeon_uvd, int, 0444); MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)"); module_param_named(vce, radeon_vce, int, 0444); -int radeon_si_support = 1; -MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); +int radeon_si_support = -1; +MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(si_support, radeon_si_support, int, 0444); -int radeon_cik_support = 1; -MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); +int radeon_cik_support = -1; +MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)"); module_param_named(cik_support, radeon_cik_support, int, 0444); -static struct pci_device_id pciidlist[] = { +static const struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; - MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver kms_driver; +static bool radeon_support_enabled(struct device *dev, + const enum radeon_family family) +{ + const char *gen; + int module_param = -1; + bool amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU); + bool support_by_default = true; + + switch (family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + gen = "SI"; + module_param = radeon_si_support; + amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_SI); + support_by_default = false; + break; + + case CHIP_BONAIRE: + case CHIP_HAWAII: + support_by_default = false; + fallthrough; + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + gen = "CIK"; + module_param = radeon_cik_support; + amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_CIK); + break; + + default: + /* All other chips are supported by radeon only */ + return true; + } + + if ((module_param == -1 && (support_by_default || !amdgpu_support_built)) || + module_param == 1) + return true; + + if (!module_param) + dev_info(dev, "%s support disabled by module param\n", gen); + + return false; +} + static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long flags = 0; - struct drm_device *dev; + struct drm_device *ddev; + struct radeon_device *rdev; + struct device *dev = &pdev->dev; + const struct drm_format_info *format; int ret; if (!ent) @@ -267,81 +318,59 @@ static int radeon_pci_probe(struct pci_dev *pdev, flags = ent->driver_data; - if (!radeon_si_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(&pdev->dev, - "SI support disabled by module param\n"); - return -ENODEV; - } - } - if (!radeon_cik_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(&pdev->dev, - "CIK support disabled by module param\n"); - return -ENODEV; - } - } + if (!radeon_support_enabled(dev, flags & RADEON_FAMILY_MASK)) + return -ENODEV; if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver); + ret = aperture_remove_conflicting_pci_devices(pdev, kms_driver.name); if (ret) return ret; - dev = drm_dev_alloc(&kms_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + rdev = devm_drm_dev_alloc(dev, &kms_driver, typeof(*rdev), ddev); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + rdev->dev = dev; + rdev->pdev = pdev; + ddev = rdev_to_drm(rdev); + ddev->dev_private = rdev; ret = pci_enable_device(pdev); if (ret) - goto err_free; + return ret; - pci_set_drvdata(pdev, dev); + pci_set_drvdata(pdev, ddev); + + ret = radeon_driver_load_kms(ddev, flags); + if (ret) + goto err; - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(ddev, flags); if (ret) - goto err_agp; + goto err; - radeon_fbdev_setup(dev->dev_private); + if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_C8); + else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) + format = drm_format_info(DRM_FORMAT_RGB565); + else + format = NULL; + + drm_client_setup(ddev, format); return 0; -err_agp: +err: pci_disable_device(pdev); -err_free: - drm_dev_put(dev); return ret; } static void -radeon_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_put_dev(dev); -} - -static void radeon_pci_shutdown(struct pci_dev *pdev) { - /* if we are running in a VM, make sure the device - * torn down properly on reboot/shutdown - */ - if (radeon_device_is_virtual()) - radeon_pci_remove(pdev); - #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64) /* * Some adapters need to be suspended before a @@ -458,7 +487,6 @@ static int radeon_pmops_runtime_idle(struct device *dev) } } - pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ return 1; @@ -480,7 +508,6 @@ long radeon_drm_ioctl(struct file *filp, ret = drm_ioctl(filp, cmd, arg); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } @@ -520,6 +547,7 @@ static const struct file_operations radeon_driver_kms_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = radeon_kms_compat_ioctl, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct drm_ioctl_desc radeon_ioctls_kms[] = { @@ -569,7 +597,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = { static const struct drm_driver kms_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET, - .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, .unload = radeon_driver_unload_kms, @@ -581,9 +608,10 @@ static const struct drm_driver kms_driver = { .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, + RADEON_FBDEV_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, - .date = DRIVER_DATE, .major = KMS_DRIVER_MAJOR, .minor = KMS_DRIVER_MINOR, .patchlevel = KMS_DRIVER_PATCHLEVEL, @@ -593,7 +621,6 @@ static struct pci_driver radeon_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = radeon_pci_probe, - .remove = radeon_pci_remove, .shutdown = radeon_pci_shutdown, .driver.pm = &radeon_pm_ops, }; diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 02a65971d140..0f3dbffc492d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -43,7 +43,6 @@ #define DRIVER_NAME "radeon" #define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20080528" /* Interface history: * diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3de3dce9e89d..fafed331e0a0 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *clone_encoder; - uint32_t index_mask = 0; + uint32_t index_mask = drm_encoder_mask(encoder); int count; /* DIG routing gets problematic */ @@ -386,7 +386,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, case DRM_MODE_CONNECTOR_HDMIB: if (radeon_connector->use_digital) { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (ASIC_IS_DCE6(rdev) && connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else @@ -408,7 +408,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, return false; else { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { + if (ASIC_IS_DCE6(rdev) && connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index 02bf25759059..fd083aaa91bb 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj) } static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { - const struct drm_format_info *info; struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; @@ -67,7 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, int height = mode_cmd->height; u32 cpp; - info = drm_get_format_info(rdev->ddev, mode_cmd); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -148,15 +147,14 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user) struct radeon_device *rdev = fb_helper->dev->dev_private; int ret; - ret = pm_runtime_get_sync(rdev->ddev->dev); + ret = pm_runtime_get_sync(rdev_to_drm(rdev)->dev); if (ret < 0 && ret != -EACCES) goto err_pm_runtime_mark_last_busy; return 0; err_pm_runtime_mark_last_busy: - pm_runtime_mark_last_busy(rdev->ddev->dev); - pm_runtime_put_autosuspend(rdev->ddev->dev); + pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); return ret; } @@ -165,8 +163,7 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user) struct drm_fb_helper *fb_helper = info->par; struct radeon_device *rdev = fb_helper->dev->dev_private; - pm_runtime_mark_last_busy(rdev->ddev->dev); - pm_runtime_put_autosuspend(rdev->ddev->dev); + pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev); return 0; } @@ -185,8 +182,6 @@ static void radeon_fbdev_fb_destroy(struct fb_info *info) radeon_fbdev_destroy_pinned_object(gobj); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops radeon_fbdev_fb_ops = { @@ -198,16 +193,16 @@ static const struct fb_ops radeon_fbdev_fb_ops = { .fb_destroy = radeon_fbdev_fb_destroy, }; -/* - * Fbdev helpers and struct drm_fb_helper_funcs - */ +static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { +}; -static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; + const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; - struct fb_info *info; + struct fb_info *info = fb_helper->info; struct drm_gem_object *gobj; struct radeon_bo *rbo; struct drm_framebuffer *fb; @@ -224,7 +219,9 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj); + format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format, + mode_cmd.modifier[0]); + ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; @@ -236,22 +233,16 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, ret = -ENOMEM; goto err_radeon_fbdev_destroy_pinned_object; } - ret = radeon_framebuffer_init(rdev->ddev, fb, &mode_cmd, gobj); + ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto err_kfree; } /* setup helper */ + fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; fb_helper->fb = fb; - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_framebuffer_unregister_private; - } - info->fbops = &radeon_fbdev_fb_ops; /* radeon resume is fragile and needs a vt switch to help it along */ @@ -277,10 +268,6 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper, return 0; -err_drm_framebuffer_unregister_private: - fb_helper->fb = NULL; - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); err_kfree: kfree(fb); err_radeon_fbdev_destroy_pinned_object: @@ -288,119 +275,9 @@ err_radeon_fbdev_destroy_pinned_object: return ret; } -static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = { - .fb_probe = radeon_fbdev_fb_helper_fb_probe, -}; - -/* - * Fbdev client and struct drm_client_funcs - */ - -static void radeon_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = fb_helper->dev; - struct radeon_device *rdev = dev->dev_private; - - if (fb_helper->info) { - vga_switcheroo_client_fb_set(rdev->pdev, NULL); - drm_helper_force_disable_all(dev); - drm_fb_helper_unregister_info(fb_helper); - } else { - drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - } -} - -static int radeon_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - vga_switcheroo_process_delayed_switch(); - - return 0; -} - -static int radeon_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - struct radeon_device *rdev = dev->dev_private; - int ret; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err_drm_err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto err_drm_fb_helper_fini; - - vga_switcheroo_client_fb_set(rdev->pdev, fb_helper->info); - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); -err_drm_err: - drm_err(dev, "Failed to setup radeon fbdev emulation (ret=%d)\n", ret); - return ret; -} - -static const struct drm_client_funcs radeon_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = radeon_fbdev_client_unregister, - .restore = radeon_fbdev_client_restore, - .hotplug = radeon_fbdev_client_hotplug, -}; - -void radeon_fbdev_setup(struct radeon_device *rdev) -{ - struct drm_fb_helper *fb_helper; - int bpp_sel = 32; - int ret; - - if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) - bpp_sel = 8; - else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024)) - bpp_sel = 16; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) - return; - drm_fb_helper_prepare(rdev->ddev, fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs); - - ret = drm_client_init(rdev->ddev, &fb_helper->client, "radeon-fbdev", - &radeon_fbdev_client_funcs); - if (ret) { - drm_err(rdev->ddev, "Failed to register client: %d\n", ret); - goto err_drm_client_init; - } - - drm_client_register(&fb_helper->client); - - return; - -err_drm_client_init: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); -} - -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ - if (rdev->ddev->fb_helper) - drm_fb_helper_set_suspend(rdev->ddev->fb_helper, state); -} - bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { - struct drm_fb_helper *fb_helper = rdev->ddev->fb_helper; + struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper; struct drm_gem_object *gobj; if (!fb_helper) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 4fb780d96f32..167d6f122b8e 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -150,7 +150,7 @@ int radeon_fence_emit(struct radeon_device *rdev, rdev->fence_context + ring, seq); radeon_fence_ring_emit(rdev, ring, *fence); - trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); + trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq); radeon_fence_schedule_check(rdev, ring); return 0; } @@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f) if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) return true; - if (down_read_trylock(&rdev->exclusive_lock)) { - radeon_fence_process(rdev, ring); - up_read(&rdev->exclusive_lock); - - if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) - return true; - } return false; } @@ -489,7 +482,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, if (!target_seq[i]) continue; - trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); + trace_radeon_fence_wait_begin(rdev_to_drm(rdev), i, target_seq[i]); radeon_irq_kms_sw_irq_get(rdev, i); } @@ -511,7 +504,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, continue; radeon_irq_kms_sw_irq_put(rdev, i); - trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); + trace_radeon_fence_wait_end(rdev_to_drm(rdev), i, target_seq[i]); } return r; @@ -575,48 +568,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) } /** - * radeon_fence_wait_any - wait for a fence to signal on any ring - * - * @rdev: radeon device pointer - * @fences: radeon fence object(s) - * @intr: use interruptable sleep - * - * Wait for any requested fence to signal (all asics). Fence - * array is indexed by ring id. @intr selects whether to use - * interruptable (true) or non-interruptable (false) sleep when - * waiting for the fences. Used by the suballocator. - * Returns 0 if any fence has passed, error for all other cases. - */ -int radeon_fence_wait_any(struct radeon_device *rdev, - struct radeon_fence **fences, - bool intr) -{ - uint64_t seq[RADEON_NUM_RINGS]; - unsigned int i, num_rings = 0; - long r; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - seq[i] = 0; - - if (!fences[i]) - continue; - - seq[i] = fences[i]->seq; - ++num_rings; - } - - /* nothing to wait for ? */ - if (num_rings == 0) - return -ENOENT; - - r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - - return 0; -} - -/** * radeon_fence_wait_next - wait for the next fence to signal * * @rdev: radeon device pointer @@ -840,7 +791,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); rdev->fence_drv[ring].initialized = true; - dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n", + dev_info(rdev->dev, "fence driver on ring %d uses gpu addr 0x%016llx\n", ring, rdev->fence_drv[ring].gpu_addr); return 0; } @@ -995,7 +946,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops, void radeon_debugfs_fence_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("radeon_gpu_reset", 0444, root, rdev, &radeon_debugfs_gpu_reset_fops); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 4bb242437ff6..acd89a20f272 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -346,14 +346,14 @@ int radeon_gart_init(struct radeon_device *rdev) DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); /* Allocate pages table */ - rdev->gart.pages = vzalloc(array_size(sizeof(void *), - rdev->gart.num_cpu_pages)); + rdev->gart.pages = vcalloc(rdev->gart.num_cpu_pages, + sizeof(void *)); if (rdev->gart.pages == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t), - rdev->gart.num_gpu_pages)); + rdev->gart.pages_entry = vmalloc_array(rdev->gart.num_gpu_pages, + sizeof(uint64_t)); if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index e66a230331ee..18ca1bcfd2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -const struct drm_gem_object_funcs radeon_gem_object_funcs; - static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -88,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { radeon_mn_unregister(robj); - radeon_bo_unref(&robj); + ttm_bo_fini(&robj->tbo); } } @@ -132,7 +130,6 @@ retry: return r; } *obj = &robj->tbo.base; - (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); @@ -608,33 +605,40 @@ out: static void radeon_gem_va_update_vm(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { - struct ttm_validate_buffer tv, *entry; - struct radeon_bo_list *vm_bos; - struct ww_acquire_ctx ticket; + struct radeon_bo_list *vm_bos, *entry; struct list_head list; + struct drm_exec exec; unsigned domain; int r; INIT_LIST_HEAD(&list); - tv.bo = &bo_va->bo->tbo; - tv.num_shared = 1; - list_add(&tv.head, &list); - vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); - if (r) - goto error_free; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + list_for_each_entry(entry, &list, list) { + r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base, + 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_cleanup; + } - list_for_each_entry(entry, &list, head) { - domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type); + r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_cleanup; + } + + list_for_each_entry(entry, &list, list) { + domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type); /* if anything is swapped out don't swap it in here, just abort and wait for the next CS */ if (domain == RADEON_GEM_DOMAIN_CPU) - goto error_unreserve; + goto error_cleanup; } mutex_lock(&bo_va->vm->mutex); @@ -648,10 +652,8 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, error_unlock: mutex_unlock(&bo_va->vm->mutex); -error_unreserve: - ttm_eu_backoff_reservation(&ticket, &list); - -error_free: +error_cleanup: + drm_exec_fini(&exec); kvfree(vm_bos); if (r && r != -ERESTARTSYS) @@ -899,7 +901,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info); void radeon_gem_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("radeon_gem_info", 0444, root, rdev, &radeon_debugfs_gem_info_fops); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 3d174390a8af..1f16619ed06e 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -1011,7 +1011,7 @@ void radeon_i2c_add(struct radeon_device *rdev, struct radeon_i2c_bus_rec *rec, const char *name) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); int i; for (i = 0; i < RADEON_MAX_I2C_BUS; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 63d914f3414d..1aa41cc3f991 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -309,7 +309,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_sa_info); static void radeon_debugfs_sa_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("radeon_sa_info", 0444, root, rdev, &radeon_debugfs_sa_info_fops); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index c4dda908666c..9961251b44ba 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -80,7 +80,7 @@ static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, hotplug_work.work); - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; @@ -101,7 +101,7 @@ static void radeon_dp_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, dp_work); - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; @@ -197,7 +197,7 @@ static void radeon_driver_irq_uninstall_kms(struct drm_device *dev) static int radeon_irq_install(struct radeon_device *rdev, int irq) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); int ret; if (irq == IRQ_NOTCONNECTED) @@ -218,7 +218,7 @@ static int radeon_irq_install(struct radeon_device *rdev, int irq) static void radeon_irq_uninstall(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct pci_dev *pdev = to_pci_dev(dev->dev); radeon_driver_irq_uninstall_kms(dev); @@ -322,9 +322,9 @@ int radeon_irq_kms_init(struct radeon_device *rdev) spin_lock_init(&rdev->irq.lock); /* Disable vblank irqs aggressively for power-saving */ - rdev->ddev->vblank_disable_immediate = true; + rdev_to_drm(rdev)->vblank_disable_immediate = true; - r = drm_vblank_init(rdev->ddev, rdev->num_crtc); + r = drm_vblank_init(rdev_to_drm(rdev), rdev->num_crtc); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index a16590c6247f..7cbe02ffb193 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev) rdev->agp = NULL; done_free: - kfree(rdev); dev->dev_private = NULL; } @@ -104,15 +103,9 @@ done_free: int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct pci_dev *pdev = to_pci_dev(dev->dev); - struct radeon_device *rdev; + struct radeon_device *rdev = dev->dev_private; int r, acpi_status; - rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); - if (rdev == NULL) { - return -ENOMEM; - } - dev->dev_private = (void *)rdev; - #ifdef __alpha__ rdev->hose = pdev->sysdata; #endif @@ -176,7 +169,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } @@ -683,7 +675,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = fpriv; } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -693,7 +684,6 @@ err_fpriv: kfree(fpriv); err_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; } @@ -743,7 +733,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index c4350ac2b3d2..d1e8b9757a65 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -136,9 +136,9 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) } if (rdev->is_atom_bios) - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } @@ -450,7 +450,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, } bd->props.brightness = radeon_legacy_backlight_get_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; + bd->props.power = BACKLIGHT_POWER_ON; backlight_update_status(bd); DRM_INFO("radeon legacy LVDS backlight initialized\n"); @@ -545,9 +545,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); if (rdev->is_atom_bios) - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } @@ -742,9 +742,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); if (rdev->is_atom_bios) - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } @@ -908,9 +908,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); if (rdev->is_atom_bios) - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } @@ -1113,9 +1113,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) } if (rdev->is_atom_bios) - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 546381a5c918..9e34da2cacef 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -38,7 +38,12 @@ #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +struct drm_fb_helper; +struct drm_fb_helper_surface_size; +struct drm_format_info; + struct edid; +struct drm_edid; struct radeon_bo; struct radeon_device; @@ -262,8 +267,7 @@ struct radeon_mode_info { /* Output CSC */ struct drm_property *output_csc_property; /* hardcoded DFP edid from BIOS */ - struct edid *bios_hardcoded_edid; - int bios_hardcoded_edid_size; + const struct drm_edid *bios_hardcoded_edid; /* firmware flags */ u16 firmware_flags; @@ -701,11 +705,9 @@ extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connecto extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); extern int radeon_get_monitor_bpc(struct drm_connector *connector); -extern struct edid *radeon_connector_edid(struct drm_connector *connector); - extern void radeon_connector_hotplug(struct drm_connector *connector); extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); extern void radeon_dp_set_link_config(struct drm_connector *connector, const struct drm_display_mode *mode); extern void radeon_dp_link_train(struct drm_encoder *encoder, @@ -889,6 +891,7 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *rfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); @@ -937,14 +940,14 @@ void dce8_program_fmt(struct drm_encoder *encoder); /* fbdev layer */ #if defined(CONFIG_DRM_FBDEV_EMULATION) -void radeon_fbdev_setup(struct radeon_device *rdev); -void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); +int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = radeon_fbdev_driver_fbdev_probe bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); #else -static inline void radeon_fbdev_setup(struct radeon_device *rdev) -{ } -static inline void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -{ } +#define RADEON_FBDEV_DRIVER_OPS \ + .fbdev_probe = NULL static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { return false; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index a955f8a2f7fe..a0fc0801abb0 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -150,7 +150,8 @@ int radeon_bo_create(struct radeon_device *rdev, bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); + drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); @@ -256,18 +257,15 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) if (bo == NULL) return NULL; - ttm_bo_get(&bo->tbo); + drm_gem_object_get(&bo->tbo.base); return bo; } void radeon_bo_unref(struct radeon_bo **bo) { - struct ttm_buffer_object *tbo; - if ((*bo) == NULL) return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); + drm_gem_object_put(&(*bo)->tbo.base); *bo = NULL; } @@ -466,23 +464,26 @@ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) } int radeon_bo_list_validate(struct radeon_device *rdev, - struct ww_acquire_ctx *ticket, + struct drm_exec *exec, struct list_head *head, int ring) { struct ttm_operation_ctx ctx = { true, false }; struct radeon_bo_list *lobj; - struct list_head duplicates; - int r; u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); + int r; - INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); - if (unlikely(r != 0)) { - return r; + drm_exec_until_all_locked(exec) { + list_for_each_entry(lobj, head, list) { + r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base, + 1); + drm_exec_retry_on_contention(exec); + if (unlikely(r && r != -EALREADY)) + return r; + } } - list_for_each_entry(lobj, head, tv.head) { + list_for_each_entry(lobj, head, list) { struct radeon_bo *bo = lobj->robj; if (!bo->tbo.pin_count) { u32 domain = lobj->preferred_domains; @@ -521,7 +522,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev, domain = lobj->allowed_domains; goto retry; } - ttm_eu_backoff_reservation(ticket, head); return r; } } @@ -529,11 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev, lobj->tiling_flags = bo->tiling_flags; } - list_for_each_entry(lobj, &duplicates, tv.head) { - lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); - lobj->tiling_flags = lobj->robj->tiling_flags; - } - return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 39cc87a59a9a..d7bbb52db546 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -152,7 +152,7 @@ extern void radeon_bo_force_delete(struct radeon_device *rdev); extern int radeon_bo_init(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev); extern int radeon_bo_list_validate(struct radeon_device *rdev, - struct ww_acquire_ctx *ticket, + struct drm_exec *exec, struct list_head *head, int ring); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, u32 tiling_flags, u32 pitch); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 2d9d9f46f243..a855a96dd2ea 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -282,7 +282,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) if (rdev->irq.installed) { i = 0; - drm_for_each_crtc(crtc, rdev->ddev) { + drm_for_each_crtc(crtc, rdev_to_drm(rdev)) { if (rdev->pm.active_crtcs & (1 << i)) { /* This can fail if a modeset is in progress */ if (drm_crtc_vblank_get(crtc) == 0) @@ -299,7 +299,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) if (rdev->irq.installed) { i = 0; - drm_for_each_crtc(crtc, rdev->ddev) { + drm_for_each_crtc(crtc, rdev_to_drm(rdev)) { if (rdev->pm.req_vblank & (1 << i)) { rdev->pm.req_vblank &= ~(1 << i); drm_crtc_vblank_put(crtc); @@ -671,7 +671,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); int temp; /* Can't get temperature when the card is off */ @@ -715,7 +715,7 @@ static ssize_t radeon_hwmon_show_sclk(struct device *dev, struct device_attribute *attr, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); u32 sclk = 0; /* Can't get clock frequency when the card is off */ @@ -740,7 +740,7 @@ static ssize_t radeon_hwmon_show_vddc(struct device *dev, struct device_attribute *attr, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); u16 vddc = 0; /* Can't get vddc when the card is off */ @@ -907,8 +907,7 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) static bool radeon_dpm_single_display(struct radeon_device *rdev) { - bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? - true : false; + bool single_display = rdev->pm.dpm.new_active_crtc_count < 2; /* check if the vblank period is too short to adjust the mclk */ if (single_display && rdev->asic->dpm.vblank_too_short) { @@ -1692,7 +1691,7 @@ void radeon_pm_fini(struct radeon_device *rdev) static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; @@ -1765,7 +1764,7 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_connector *radeon_connector; @@ -1826,7 +1825,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, + vbl_status = radeon_get_crtc_scanoutpos(rdev_to_drm(rdev), crtc, USE_REAL_VBLANKSTART, &vpos, &hpos, NULL, NULL, @@ -1918,7 +1917,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused) { struct radeon_device *rdev = m->private; - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); if ((rdev->flags & RADEON_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { @@ -1955,7 +1954,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_pm_info); static void radeon_debugfs_pm_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("radeon_pm_info", 0444, root, rdev, &radeon_debugfs_pm_info_fops); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 8d1d458286a8..581ae20c46e4 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -550,7 +550,7 @@ static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_r { #if defined(CONFIG_DEBUG_FS) const char *ring_name = radeon_debugfs_ring_idx_to_name(ring->idx); - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; if (ring_name) debugfs_create_file(ring_name, 0444, root, ring, diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index c9fef9b61ced..818554e60537 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -455,7 +455,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ringC, 64); if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); + DRM_ERROR("Failed to lock ring C %p\n", ringC); goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); @@ -481,7 +481,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ringC, 64); if (r) { - DRM_ERROR("Failed to lock ring B %p\n", ringC); + DRM_ERROR("Failed to lock ring C %p\n", ringC); goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 5c65b6dfb99a..695ac32f7535 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -219,8 +219,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { radeon_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->resource); - ttm_bo_assign_mem(bo, new_mem); + ttm_bo_move_null(bo, new_mem); goto out; } if (rdev->ring[radeon_copy_ring_index(rdev)].ready && @@ -682,10 +681,12 @@ int radeon_ttm_init(struct radeon_device *rdev) /* No others user of address space so set it to 0 */ r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, - rdev->ddev->anon_inode->i_mapping, - rdev->ddev->vma_offset_manager, - rdev->need_swiotlb, - dma_addressing_limited(&rdev->pdev->dev)); + rdev_to_drm(rdev)->anon_inode->i_mapping, + rdev_to_drm(rdev)->vma_offset_manager, + (rdev->need_swiotlb ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (dma_addressing_limited(&rdev->pdev->dev) ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; @@ -890,7 +891,7 @@ static const struct file_operations radeon_ttm_gtt_fops = { static void radeon_ttm_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct drm_minor *minor = rdev->ddev->primary; + struct drm_minor *minor = rdev_to_drm(rdev)->primary; struct dentry *root = minor->debugfs_root; debugfs_create_file("radeon_vram", 0444, root, rdev, diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 058a1c8451b2..ded5747a58d1 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -961,7 +961,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, unsigned optimal_score = ~0; /* loop through vco from low to high */ - vco_min = max(max(vco_min, vclk), dclk); + vco_min = max3(vco_min, vclk, dclk); for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { uint64_t fb_div = (uint64_t)vco_freq * fb_factor; diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index d1871af967d4..bdbc1bbe8a9b 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -86,7 +86,7 @@ int radeon_vce_init(struct radeon_device *rdev) r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); if (r) { - dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", + dev_err(rdev->dev, "radeon_vce: can't load firmware \"%s\"\n", fw_name); return r; } @@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev) rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); - /* we can only work with this fw version for now */ + /* we can only work with these fw versions for now */ if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) && (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) && (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8)))) @@ -281,7 +281,7 @@ static void radeon_vce_idle_work_handler(struct work_struct *work) * * @rdev: radeon_device pointer * - * Make sure VCE is powerd up when we want to use it + * Make sure VCE is powered up when we want to use it */ void radeon_vce_note_usage(struct radeon_device *rdev) { @@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; bool destroyed = false, created = false, allocated = false; - uint32_t tmp, handle = 0; + uint32_t tmp = 0, handle = 0; uint32_t *size = &tmp; int i, r = 0; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index c38b4d5d6a14..21a5340aefdf 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -142,10 +142,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, list[0].robj = vm->page_directory; list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; - list[0].tv.bo = &vm->page_directory->tbo; - list[0].tv.num_shared = 1; + list[0].shared = true; list[0].tiling_flags = 0; - list_add(&list[0].tv.head, head); + list_add(&list[0].list, head); for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { if (!vm->page_tables[i].bo) @@ -154,10 +153,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, list[idx].robj = vm->page_tables[i].bo; list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; - list[idx].tv.bo = &list[idx].robj->tbo; - list[idx].tv.num_shared = 1; + list[idx].shared = true; list[idx].tiling_flags = 0; - list_add(&list[idx++].tv.head, head); + list_add(&list[idx++].list, head); } return list; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index d4d1501e6576..13cd0a688a65 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) static void rs400_gpu_init(struct radeon_device *rdev) { - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); + /* Earlier code was calling r420_pipes_init and then + * rs400_mc_wait_for_idle(rdev). The problem is that + * at least on my Mobility Radeon Xpress 200M RC410 card + * that ends up in this code path ends up num_gb_pipes == 3 + * while the card seems to have only one pipe. With the + * r420 pipe initialization method. + * + * Problems shown up as HyperZ glitches, see: + * https://bugs.freedesktop.org/show_bug.cgi?id=110897 + * + * Delegating initialization to r300 code seems to work + * and results in proper pipe numbers. The rs400 cards + * are said to be not r400, but r300 kind of cards. + */ + r300_gpu_init(rdev); + if (rs400_mc_wait_for_idle(rdev)) { pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); @@ -379,7 +393,7 @@ DEFINE_SHOW_ATTRIBUTE(rs400_debugfs_gart_info); static void rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("rs400_gart_info", 0444, root, rdev, &rs400_debugfs_gart_info_fops); @@ -474,7 +488,7 @@ int rs400_resume(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* post */ - radeon_combios_asic_init(rdev->ddev); + radeon_combios_asic_init(rdev_to_drm(rdev)); /* Resume clock after posting */ r300_clock_startup(rdev); /* Initialize surface registers */ @@ -552,7 +566,7 @@ int rs400_init(struct radeon_device *rdev) return -EINVAL; /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize memory controller */ rs400_mc_init(rdev); /* Fence driver */ diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5c162778899b..88c8e91ea651 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -321,7 +321,7 @@ void rs600_pm_misc(struct radeon_device *rdev) void rs600_pm_prepare(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -339,7 +339,7 @@ void rs600_pm_prepare(struct radeon_device *rdev) void rs600_pm_finish(struct radeon_device *rdev) { - struct drm_device *ddev = rdev->ddev; + struct drm_device *ddev = rdev_to_drm(rdev); struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; u32 tmp; @@ -408,7 +408,7 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, void rs600_hpd_init(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned enable = 0; @@ -435,7 +435,7 @@ void rs600_hpd_init(struct radeon_device *rdev) void rs600_hpd_fini(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; + struct drm_device *dev = rdev_to_drm(rdev); struct drm_connector *connector; unsigned disable = 0; @@ -797,7 +797,7 @@ int rs600_irq_process(struct radeon_device *rdev) /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); + drm_handle_vblank(rdev_to_drm(rdev), 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -806,7 +806,7 @@ int rs600_irq_process(struct radeon_device *rdev) } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); + drm_handle_vblank(rdev_to_drm(rdev), 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -1133,7 +1133,7 @@ int rs600_init(struct radeon_device *rdev) return -EINVAL; /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize memory controller */ rs600_mc_init(rdev); r100_debugfs_rbbm_init(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 14fb0819b8c1..016eb4992803 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -845,7 +845,7 @@ int rs690_init(struct radeon_device *rdev) return -EINVAL; /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize memory controller */ rs690_mc_init(rdev); rv515_debugfs(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index bbc6ccabf788..1b4dfb645585 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -255,7 +255,7 @@ DEFINE_SHOW_ATTRIBUTE(rv515_debugfs_ga_info); void rv515_debugfs(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - struct dentry *root = rdev->ddev->primary->debugfs_root; + struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; debugfs_create_file("rv515_pipes_info", 0444, root, rdev, &rv515_debugfs_pipes_info_fops); @@ -636,7 +636,7 @@ int rv515_init(struct radeon_device *rdev) if (radeon_boot_test_post_card(rdev) == false) return -EINVAL; /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9ce12fa3c356..7d4b0bf59109 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1935,7 +1935,7 @@ int rv770_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); /* initialize AGP */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 15759c8ca5b7..26197aceb001 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6198,7 +6198,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) if (wptr & RB_OVERFLOW) { wptr &= ~RB_OVERFLOW; - /* When a ring buffer overflow happen start parsing interrupt + /* When a ring buffer overflow happens, start parsing interrupts * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ @@ -6277,7 +6277,7 @@ restart_ih: event_name = "vblank"; if (rdev->irq.crtc_vblank_int[crtc_idx]) { - drm_handle_vblank(rdev->ddev, crtc_idx); + drm_handle_vblank(rdev_to_drm(rdev), crtc_idx); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } @@ -6839,7 +6839,7 @@ int si_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); + radeon_get_clock_info(rdev_to_drm(rdev)); /* Fence driver */ radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 65a911ddd509..f9267b026f8d 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1109,7 +1109,7 @@ #define MIN_POWER_SHIFT 0 #define MAX_POWER(x) ((x) << 16) #define MAX_POWER_MASK (0x3fff << 16) -#define MAX_POWER_SHIFT 0 +#define MAX_POWER_SHIFT 16 #define SQ_POWER_THROTTLE2 0x8e5c #define MAX_POWER_DELTA(x) ((x) << 0) #define MAX_POWER_DELTA_MASK (0x3fff << 0) |
