diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_display.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 365 |
1 files changed, 297 insertions, 68 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 503f89a766c3..b5d34797d606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -33,11 +33,14 @@ #include "soc15_common.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" +#include "bif/bif_4_1_d.h" #include <asm/div64.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -98,7 +101,7 @@ static void amdgpu_display_flip_callback(struct dma_fence *f, static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, struct dma_fence **f) { - struct dma_fence *fence= *f; + struct dma_fence *fence = *f; if (fence == NULL) return false; @@ -124,7 +127,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpu_crtc->base; unsigned long flags; - unsigned i; + unsigned int i; int vpos, hpos; for (i = 0; i < work->shared_count; ++i) @@ -201,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, u64 tiling_flags; int i, r; - work = kzalloc(sizeof *work, GFP_KERNEL); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (work == NULL) return -ENOMEM; @@ -231,6 +234,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, } if (!adev->enable_virtual_display) { + new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev, new_abo->flags)); if (unlikely(r != 0)) { @@ -328,22 +332,19 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, - take the current one */ + * take the current one + */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; return ret; } - /* if we have no active crtcs, then drop the power ref - we got before */ - if (!active && adev->have_disp_power_ref) { - pm_runtime_put_autosuspend(dev->dev); + /* if we have no active crtcs, then go to + * drop the power ref we got before + */ + if (!active && adev->have_disp_power_ref) adev->have_disp_power_ref = false; - } - out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); @@ -507,11 +508,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, if (amdgpu_connector->router.ddc_valid) amdgpu_i2c_router_select_ddc_port(amdgpu_connector); - if (use_aux) { + if (use_aux) ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); - } else { + else ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); - } if (ret != 2) /* Couldn't find an accessible DDC on this connector */ @@ -520,20 +520,40 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, * EDID header starts with: * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. * Only the first 6 bytes must be valid as - * drm_edid_block_valid() can fix the last 2 bytes */ + * drm_edid_block_valid() can fix the last 2 bytes + */ if (drm_edid_header_is_valid(buf) < 6) { /* Couldn't find an accessible EDID on this - * connector */ + * connector + */ return false; } return true; } +static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, unsigned int num_clips) +{ + + if (file) + return -ENOSYS; + + return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips, + num_clips); +} + static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .destroy = drm_gem_fb_destroy, .create_handle = drm_gem_fb_create_handle, }; +static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = { + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, + .dirty = amdgpu_dirtyfb +}; + uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, uint64_t bo_flags) { @@ -634,6 +654,10 @@ amdgpu_lookup_format_info(u32 format, uint64_t modifier) if (!IS_AMD_FMT_MOD(modifier)) return NULL; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX9 || + AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) + return NULL; + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) return lookup_format_info(dcc_retile_formats, ARRAY_SIZE(dcc_retile_formats), @@ -698,6 +722,30 @@ extract_render_dcc_offset(struct amdgpu_device *adev, return 0; } +static int convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer *afb) +{ + u64 modifier = 0; + int swizzle_mode = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE); + + if (!swizzle_mode) { + modifier = DRM_FORMAT_MOD_LINEAR; + } else { + int max_comp_block = + AMDGPU_TILING_GET(afb->tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK); + + modifier = + AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) | + AMD_FMT_MOD_SET(TILE, swizzle_mode) | + AMD_FMT_MOD_SET(DCC, afb->gfx12_dcc) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block); + } + + afb->base.modifier = modifier; + afb->base.flags |= DRM_MODE_FB_MODIFIERS; + return 0; +} + static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) { struct amdgpu_device *adev = drm_to_adev(afb->base.dev); @@ -743,11 +791,13 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) return -EINVAL; } - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) version = AMD_FMT_MOD_TILE_VER_GFX11; - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(10, 3, 0)) version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(10, 0, 0)) version = AMD_FMT_MOD_TILE_VER_GFX10; else version = AMD_FMT_MOD_TILE_VER_GFX9; @@ -756,13 +806,15 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) case 0: /* Z microtiling */ return -EINVAL; case 1: /* S microtiling */ - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0)) { if (!has_xor) version = AMD_FMT_MOD_TILE_VER_GFX9; } break; case 2: - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0)) { if (!has_xor && afb->base.format->cpp[0] != 4) version = AMD_FMT_MOD_TILE_VER_GFX9; } @@ -815,10 +867,12 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) u64 render_dcc_offset; /* Enable constant encode on RAVEN2 and later. */ - bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN || - (adev->asic_type == CHIP_RAVEN && - adev->external_rev_id >= 0x81)) && - adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0); + bool dcc_constant_encode = + (adev->asic_type > CHIP_RAVEN || + (adev->asic_type == CHIP_RAVEN && + adev->external_rev_id >= 0x81)) && + amdgpu_ip_version(adev, GC_HWIP, 0) < + IP_VERSION(11, 0, 0); int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B : dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B : @@ -855,7 +909,9 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) if (adev->family >= AMDGPU_FAMILY_NV) { int extra_pipe = 0; - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) && + if ((amdgpu_ip_version(adev, GC_HWIP, + 0) >= + IP_VERSION(10, 3, 0)) && pipes == packers && pipes > 1) extra_pipe = 1; @@ -889,8 +945,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) { u64 micro_tile_mode; - /* Zero swizzle mode means linear */ - if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) + if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */ return 0; micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); @@ -1014,6 +1069,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) block_width = 256 / format_info->cpp[i]; block_height = 1; block_size_log2 = 8; + } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) { + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); + + switch (swizzle) { + case AMD_FMT_MOD_TILE_GFX12_256B_2D: + block_size_log2 = 8; + break; + case AMD_FMT_MOD_TILE_GFX12_4K_2D: + block_size_log2 = 12; + break; + case AMD_FMT_MOD_TILE_GFX12_64K_2D: + block_size_log2 = 16; + break; + case AMD_FMT_MOD_TILE_GFX12_256K_2D: + block_size_log2 = 18; + break; + default: + drm_dbg_kms(rfb->base.dev, + "Gfx12 swizzle mode with unknown block size: %d\n", swizzle); + return -EINVAL; + } + + get_block_dimensions(block_size_log2, format_info->cpp[i], + &block_width, &block_height); } else { int swizzle = AMD_FMT_MOD_GET(TILE, modifier); @@ -1049,7 +1128,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) return ret; } - if (AMD_FMT_MOD_GET(DCC, modifier)) { + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 && + AMD_FMT_MOD_GET(DCC, modifier)) { if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { block_size_log2 = get_dcc_block_size(modifier, false, false); get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], @@ -1079,7 +1159,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) } static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, - uint64_t *tiling_flags, bool *tmz_surface) + uint64_t *tiling_flags, bool *tmz_surface, + bool *gfx12_dcc) { struct amdgpu_bo *rbo; int r; @@ -1087,6 +1168,7 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb if (!amdgpu_fb) { *tiling_flags = 0; *tmz_surface = false; + *gfx12_dcc = false; return 0; } @@ -1100,11 +1182,9 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb return r; } - if (tiling_flags) - amdgpu_bo_get_tiling_flags(rbo, tiling_flags); - - if (tmz_surface) - *tmz_surface = amdgpu_bo_encrypted(rbo); + amdgpu_bo_get_tiling_flags(rbo, tiling_flags); + *tmz_surface = amdgpu_bo_encrypted(rbo); + *gfx12_dcc = rbo->flags & AMDGPU_GEM_CREATE_GFX12_DCC; amdgpu_bo_unreserve(rbo); @@ -1114,13 +1194,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd); /* Verify that the modifier is supported. */ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { @@ -1136,7 +1217,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, if (ret) goto err; - ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); + if (drm_drv_uses_atomic_modeset(dev)) + ret = drm_framebuffer_init(dev, &rfb->base, + &amdgpu_fb_funcs_atomic); + else + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) goto err; @@ -1169,7 +1254,8 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, } } - ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface); + ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface, + &rfb->gfx12_dcc); if (ret) return ret; @@ -1183,7 +1269,11 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, if (!dev->mode_config.fb_modifiers_not_supported && !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { - ret = convert_tiling_flags_to_modifier(rfb); + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) + ret = convert_tiling_flags_to_modifier_gfx12(rfb); + else + ret = convert_tiling_flags_to_modifier(rfb); + if (ret) { drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier", rfb->tiling_flags); @@ -1206,6 +1296,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct amdgpu_framebuffer *amdgpu_fb; @@ -1216,15 +1307,17 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { - drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, " - "can't create framebuffer\n", mode_cmd->handles[0]); + drm_dbg_kms(dev, + "No GEM object associated to handle 0x%08X, can't create framebuffer\n", + mode_cmd->handles[0]); + return ERR_PTR(-ENOENT); } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ bo = gem_to_amdgpu_bo(obj); domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); - if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); drm_gem_object_put(obj); return ERR_PTR(-EINVAL); @@ -1237,7 +1330,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, - mode_cmd, obj); + info, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); drm_gem_object_put(obj); @@ -1252,24 +1345,82 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, }; -static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = -{ { UNDERSCAN_OFF, "off" }, +static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = { + { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; -static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = -{ { AMDGPU_AUDIO_DISABLE, "off" }, +static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = { + { AMDGPU_AUDIO_DISABLE, "off" }, { AMDGPU_AUDIO_ENABLE, "on" }, { AMDGPU_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ -static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = -{ { AMDGPU_FMT_DITHER_DISABLE, "off" }, +static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { + { AMDGPU_FMT_DITHER_DISABLE, "off" }, { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; +/** + * DOC: property for adaptive backlight modulation + * + * The 'adaptive backlight modulation' property is used for the compositor to + * directly control the adaptive backlight modulation power savings feature + * that is part of DCN hardware. + * + * The property will be attached specifically to eDP panels that support it. + * + * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings' + * to be able to control it. + * If set to 'off' the compositor will ensure it stays off. + * The other values 'min', 'bias min', 'bias max', and 'max' will control the + * intensity of the power savings. + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ +static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev) +{ + const struct drm_prop_enum_list props[] = { + { ABM_SYSFS_CONTROL, "sysfs" }, + { ABM_LEVEL_OFF, "off" }, + { ABM_LEVEL_MIN, "min" }, + { ABM_LEVEL_BIAS_MIN, "bias min" }, + { ABM_LEVEL_BIAS_MAX, "bias max" }, + { ABM_LEVEL_MAX, "max" }, + }; + struct drm_property *prop; + int i; + + if (!adev->dc_enabled) + return 0; + + prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM, + "adaptive backlight modulation", + 6); + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(adev_to_drm(adev), prop); + + return ret; + } + } + + adev->mode_info.abm_level_property = prop; + + return 0; +} + int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) { int sz; @@ -1316,15 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); - if (adev->dc_enabled) { - adev->mode_info.abm_level_property = - drm_property_create_range(adev_to_drm(adev), 0, - "abm level", 0, 4); - if (!adev->mode_info.abm_level_property) - return -ENOMEM; - } - - return 0; + return amdgpu_display_setup_abm_prop(adev); } void amdgpu_display_update_priority(struct amdgpu_device *adev) @@ -1391,7 +1534,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && - connector->display_info.is_hdmi && + connector && connector->display_info.is_hdmi && amdgpu_display_is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; @@ -1410,6 +1553,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } if (amdgpu_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; + a.full = dfixed_const(src_v); b.full = dfixed_const(dst_v); amdgpu_crtc->vsc.full = dfixed_div(a, b); @@ -1429,7 +1573,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * * \param dev Device to query. * \param pipe Crtc to query. - * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). * For driver internal use only also supports these flags: * * USE_REAL_VBLANKSTART to use the real start of vblank instead @@ -1496,8 +1640,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; - } - else { + } else { /* No: Fake something reasonable which gives at least ok results. */ vbl_start = mode->crtc_vdisplay; vbl_end = 0; @@ -1505,8 +1648,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { - /* Caller wants distance from real vbl_start in *hpos */ - *hpos = *vpos - vbl_start; + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; } /* Fudge vblank to start a few scanlines earlier to handle the @@ -1528,7 +1671,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; + ret |= DRM_SCANOUTPOS_IN_VBLANK; /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { @@ -1618,6 +1761,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int r; + drm_kms_helper_poll_disable(dev); + /* turn off display hw */ drm_modeset_lock_all(dev); drm_connector_list_iter_begin(dev, &iter); @@ -1634,6 +1779,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { amdgpu_bo_unpin(aobj); @@ -1641,9 +1787,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) } } - if (fb == NULL || fb->obj[0] == NULL) { + if (!fb || !fb->obj[0]) continue; - } + robj = gem_to_amdgpu_bo(fb->obj[0]); if (!amdgpu_display_robj_is_fb(adev, robj)) { r = amdgpu_bo_reserve(robj, true); @@ -1670,8 +1816,10 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); if (r != 0) dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); @@ -1694,6 +1842,87 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) drm_modeset_unlock_all(dev); + drm_kms_helper_poll_enable(dev); + return 0; } +/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel() + * they are called from the panic handler, and protected by the drm_panic spinlock. + */ +static struct amdgpu_bo *panic_abo; + +/* Use the indirect MMIO to write each pixel to the GPU VRAM, + * This is a simplified version of amdgpu_device_mm_access() + */ +static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb, + unsigned int x, + unsigned int y, + u32 color) +{ + struct amdgpu_res_cursor cursor; + unsigned long offset; + struct amdgpu_bo *abo = panic_abo; + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + uint32_t tmp; + + offset = x * 4 + y * sb->pitch[0]; + amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor); + + tmp = cursor.start >> 31; + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000); + if (tmp != 0xffffffff) + WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); + WREG32_NO_KIQ(mmMM_DATA, color); +} + +int amdgpu_display_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct amdgpu_bo *abo; + struct drm_framebuffer *fb = plane->state->fb; + + if (!fb) + return -EINVAL; + + DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format); + + abo = gem_to_amdgpu_bo(fb->obj[0]); + if (!abo) + return -EINVAL; + + sb->width = fb->width; + sb->height = fb->height; + /* Use the generic linear format, because tiling will be disabled in panic_flush() */ + sb->format = drm_format_info(fb->format->format); + if (!sb->format) + return -EINVAL; + + sb->pitch[0] = fb->pitches[0]; + + if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) { + if (abo->tbo.resource->mem_type != TTM_PL_VRAM) { + drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n"); + return -EINVAL; + } + /* Only handle 32bits format, to simplify mmio access */ + if (fb->format->cpp[0] != 4) { + drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n"); + return -EINVAL; + } + sb->set_pixel = amdgpu_display_set_pixel; + panic_abo = abo; + return 0; + } + if (!abo->kmap.virtual && + ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) { + drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n"); + return -ENOMEM; + } + if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK) + iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual); + else + iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual); + + return 0; +} |
