summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_dp.c
diff options
context:
space:
mode:
authorDhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>2016-11-14 13:50:20 -0800
committerVille Syrjälä <ville.syrjala@linux.intel.com>2016-12-05 16:23:02 +0200
commitfd81c44eba9ca1e78d0601f37b5d7819df522aa7 (patch)
tree80aa52844bc5c5c1d74edcf59ba8db9bfe05dec2 /drivers/gpu/drm/i915/intel_dp.c
parent3c30c7f7b09a0eec58cb43f96705877f66975057 (diff)
drm/i915: Fix DP link rate math
We store DP link rates as link clock frequencies in kHz, just like all other clock values. But, DP link rates in the DP Spec. are expressed in Gbps/lane, which seems to have led to some confusion. E.g., for HBR2 Max. data rate = 5.4 Gbps/lane x 4 lane x 8/10 x 1/8 = 2160000 kBps where, 8/10 is for channel encoding and 1/8 is for bit to Byte conversion Using link clock frequency, like we do Max. data rate = 540000 kHz * 4 lanes = 2160000 kSymbols/s Because, each symbol has 8 bit of data, this is 2160000 kBps and there is no need to account for channel encoding here. But, currently we do 540000 kHz * 4 lanes * (8/10) = 1728000 kBps Similarly, while computing the required link bandwidth for a mode, there is a mysterious 1/10 term. This should simply be pixel_clock kHz * (bpp/8) to give the final result in kBps v2: Changed to DIV_ROUND_UP() and comment changes (Ville) Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1479160220-17794-1-git-send-email-dhinakaran.pandiyan@intel.com Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_dp.c')
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c35
1 files changed, 15 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f2420cbe06a..53fc1b3f6770 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -161,33 +161,23 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
return min(source_max, sink_max);
}
-/*
- * The units on the numbers in the next two are... bizarre. Examples will
- * make it clearer; this one parallels an example in the eDP spec.
- *
- * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
- *
- * 270000 * 1 * 8 / 10 == 216000
- *
- * The actual data capacity of that configuration is 2.16Gbit/s, so the
- * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
- * or equivalently, kilopixels per second - so for 1680x1050R it'd be
- * 119000. At 18bpp that's 2142000 kilobits per second.
- *
- * Thus the strange-looking division by 10 in intel_dp_link_required, to
- * get the result in decakilobits instead of kilobits.
- */
-
static int
intel_dp_link_required(int pixel_clock, int bpp)
{
- return (pixel_clock * bpp + 9) / 10;
+ /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
+ return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
static int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
- return (max_link_clock * max_lanes * 8) / 10;
+ /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
+ * link rate that is generally expressed in Gbps. Since, 8 bits of data
+ * is transmitted every LS_Clk per lane, there is no need to account for
+ * the channel encoding that is done in the PHY layer here.
+ */
+
+ return max_link_clock * max_lanes;
}
static int
@@ -3573,7 +3563,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (val == 0)
break;
- /* Value read is in kHz while drm clock is saved in deca-kHz */
+ /* Value read multiplied by 200kHz gives the per-lane
+ * link rate in kHz. The source rates are, however,
+ * stored in terms of LS_Clk kHz. The full conversion
+ * back to symbols is
+ * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
+ */
intel_dp->sink_rates[i] = (val * 200) / 10;
}
intel_dp->num_sink_rates = i;