summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/soc/intel_dram.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/soc/intel_dram.c')
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c823
1 files changed, 823 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
new file mode 100644
index 000000000000..3e588762709a
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -0,0 +1,823 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "../display/intel_display_core.h" /* FIXME */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_dram.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+#include "intel_uncore.h"
+#include "vlv_iosf_sb.h"
+
+struct dram_dimm_info {
+ u16 size;
+ u8 width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR2),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915)
+{
+ return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ?
+ INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2;
+}
+
+static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ return 533333;
+ case CLKCFG_MEM_667:
+ return 666667;
+ case CLKCFG_MEM_800:
+ return 800000;
+ }
+
+ return 0;
+}
+
+static unsigned int ilk_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u16 ddrpll;
+
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ return 800000;
+ case 0x10:
+ return 1066667;
+ case 0x14:
+ return 1333333;
+ case 0x18:
+ return 1600000;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ return 0;
+ }
+}
+
+static unsigned int chv_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_CCK));
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ return 2000000;
+ default:
+ return 1600000;
+ }
+}
+
+static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
+
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ return 800000;
+ case 2:
+ return 1066667;
+ case 3:
+ return 1333333;
+ }
+
+ return 0;
+}
+
+unsigned int intel_mem_freq(struct drm_i915_private *i915)
+{
+ if (IS_PINEVIEW(i915))
+ return pnv_mem_freq(i915);
+ else if (GRAPHICS_VER(i915) == 5)
+ return ilk_mem_freq(i915);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_mem_freq(i915);
+ else if (IS_VALLEYVIEW(i915))
+ return vlv_mem_freq(i915);
+ else
+ return 0;
+}
+
+static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
+{
+ u32 fsb;
+
+ /*
+ * Note that this only reads the state of the FSB
+ * straps, not the actual FSB frequency. Some BIOSen
+ * let you configure each independently. Ideally we'd
+ * read out the actual FSB frequency but sadly we
+ * don't know which registers have that information,
+ * and all the relevant docs have gone to bit heaven :(
+ */
+ fsb = intel_uncore_read(&i915->uncore, CLKCFG) & CLKCFG_FSB_MASK;
+
+ if (IS_PINEVIEW(i915) || IS_MOBILE(i915)) {
+ switch (fsb) {
+ case CLKCFG_FSB_400:
+ return 400000;
+ case CLKCFG_FSB_533:
+ return 533333;
+ case CLKCFG_FSB_667:
+ return 666667;
+ case CLKCFG_FSB_800:
+ return 800000;
+ case CLKCFG_FSB_1067:
+ return 1066667;
+ case CLKCFG_FSB_1333:
+ return 1333333;
+ default:
+ MISSING_CASE(fsb);
+ return 1333333;
+ }
+ } else {
+ switch (fsb) {
+ case CLKCFG_FSB_400_ALT:
+ return 400000;
+ case CLKCFG_FSB_533:
+ return 533333;
+ case CLKCFG_FSB_667:
+ return 666667;
+ case CLKCFG_FSB_800:
+ return 800000;
+ case CLKCFG_FSB_1067_ALT:
+ return 1066667;
+ case CLKCFG_FSB_1333_ALT:
+ return 1333333;
+ case CLKCFG_FSB_1600_ALT:
+ return 1600000;
+ default:
+ MISSING_CASE(fsb);
+ return 1333333;
+ }
+ }
+}
+
+static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
+{
+ u16 fsb;
+
+ fsb = intel_uncore_read16(&dev_priv->uncore, CSIPLL0) & 0x3ff;
+
+ switch (fsb) {
+ case 0x00c:
+ return 3200000;
+ case 0x00e:
+ return 3733333;
+ case 0x010:
+ return 4266667;
+ case 0x012:
+ return 4800000;
+ case 0x014:
+ return 5333333;
+ case 0x016:
+ return 5866667;
+ case 0x018:
+ return 6400000;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", fsb);
+ return 0;
+ }
+}
+
+unsigned int intel_fsb_freq(struct drm_i915_private *i915)
+{
+ if (GRAPHICS_VER(i915) == 5)
+ return ilk_fsb_freq(i915);
+ else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4)
+ return i9xx_fsb_freq(i915);
+ else
+ return 0;
+}
+
+static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ dram_info->fsb_freq = intel_fsb_freq(i915);
+ if (dram_info->fsb_freq)
+ drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
+
+ dram_info->mem_freq = intel_mem_freq(i915);
+ if (dram_info->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
+
+ if (IS_PINEVIEW(i915))
+ dram_info->type = pnv_dram_type(i915);
+
+ return 0;
+}
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total Gb for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return (val & SKL_DRAM_SIZE_MASK) * 8;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total Gb for the whole DIMM */
+static int icl_get_dimm_size(u16 val)
+{
+ return (val & ICL_DRAM_SIZE_MASK) * 8 / 2;
+}
+
+static int icl_get_dimm_width(u16 val)
+{
+ if (icl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & ICL_DRAM_WIDTH_MASK) {
+ case ICL_DRAM_WIDTH_X8:
+ case ICL_DRAM_WIDTH_X16:
+ case ICL_DRAM_WIDTH_X32:
+ val = (val & ICL_DRAM_WIDTH_MASK) >> ICL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int icl_get_dimm_ranks(u16 val)
+{
+ if (icl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & ICL_DRAM_RANK_MASK) >> ICL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total Gb to Gb per DRAM device */
+ return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) >= 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (GRAPHICS_VER(i915) >= 11) {
+ dimm->size = icl_get_dimm_size(val);
+ dimm->width = icl_get_dimm_width(val);
+ dimm->ranks = icl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ str_yes_no(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
+ channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ /* Assume 16Gb+ DIMMs are present until proven otherwise */
+ dram_info->has_16gb_dimms = true;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->has_16gb_dimms = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ str_yes_no(dram_info->symmetric_memory));
+
+ drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n",
+ str_yes_no(dram_info->has_16gb_dimms));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+
+ ret = skl_dram_get_channels_info(i915, dram_info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * Gb to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ u32 val;
+ u8 valid_ranks = 0;
+ int i;
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks);
+
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct dram_info *dram_info)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ if (ret)
+ return ret;
+
+ if (GRAPHICS_VER(dev_priv) == 12) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -EINVAL;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -EINVAL;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
+ dram_info->num_psf_gv_points = (val & 0x3000) >> 12;
+
+ return 0;
+}
+
+static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ int ret;
+
+ ret = skl_dram_get_channels_info(i915, dram_info);
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915, dram_info);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ return icl_pcode_read_mem_global_info(i915, dram_info);
+}
+
+static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ struct intel_display *display = i915->display;
+ u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
+
+ switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 8:
+ drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
+ dram_info->type = INTEL_DRAM_GDDR;
+ break;
+ case 9:
+ drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
+ dram_info->type = INTEL_DRAM_GDDR_ECC;
+ break;
+ default:
+ MISSING_CASE(val);
+ return -EINVAL;
+ }
+
+ dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
+ dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
+ /* PSF GV points not supported in D14+ */
+
+ if (DISPLAY_VER(display) >= 35)
+ dram_info->ecc_impacting_de_bw = REG_FIELD_GET(XE3P_ECC_IMPACTING_DE, val);
+
+ return 0;
+}
+
+int intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct intel_display *display = i915->display;
+ struct dram_info *dram_info;
+ int ret;
+
+ if (IS_DG2(i915) || !intel_display_device_present(display))
+ return 0;
+
+ dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
+ if (!dram_info)
+ return -ENOMEM;
+
+ i915->dram_info = dram_info;
+
+ if (DISPLAY_VER(display) >= 14)
+ ret = xelpdp_get_dram_info(i915, dram_info);
+ else if (GRAPHICS_VER(i915) >= 12)
+ ret = gen12_get_dram_info(i915, dram_info);
+ else if (GRAPHICS_VER(i915) >= 11)
+ ret = gen11_get_dram_info(i915, dram_info);
+ else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
+ ret = bxt_get_dram_info(i915, dram_info);
+ else if (GRAPHICS_VER(i915) >= 9)
+ ret = skl_get_dram_info(i915, dram_info);
+ else
+ ret = i915_get_dram_info(i915, dram_info);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
+ drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
+
+ /* TODO: Do we want to abort probe on dram detection failures? */
+ if (ret)
+ return 0;
+
+ return 0;
+}
+
+/*
+ * Returns NULL for platforms that don't have dram info. Avoid overzealous NULL
+ * checks, and prefer not dereferencing on platforms that shouldn't look at dram
+ * info, to catch accidental and incorrect dram info checks.
+ */
+const struct dram_info *intel_dram_info(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return i915->dram_info;
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
+ return;
+
+ edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (GRAPHICS_VER(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}