1
0
Fork 0

drm/i915: Switch to LTTPR non-transparent mode link training

The DP Standard's recommendation is to use the LTTPR non-transparent
mode link training if LTTPRs are detected, so let's do this.

Besides power-saving, the advantages of this are that the maximum number
of LTTPRs can only be used in non-transparent mode (the limit is 5-8 in
transparent mode), and it provides a way to narrow down the reason for a
link training failure to a given link segment. Non-transparent mode is
probably also the mode that was tested the most by the industry.

The changes in this patchset:
- Pass the DP PHY that is currently link trained to all LT helpers, so
  that these can access the correct LTTPR/DPRX DPCD registers.
- During LT take into account the LTTPR common lane rate/count and the
  per LTTPR-PHY vswing/pre-emph limits.
- Switch to LTTPR non-transparent LT mode and train each link segment
  according to the sequence in DP Standard v2.0 (complete CR/EQ for
  each segment before continuing with the next segment).

v2:
- Switch to non-transparent mode during connector detection, which is
  required before reading the per-PHY LTTPR capabilities.
- Move the DP_PHY_LTTPR() macro to drm_dp_helper.h (Ville)
- Use the new drm_dp_dpcd_read_phy_link_status() instead of adding the
  same logic to intel_dp_get_link_status(). (Ville)
- Make intel_dp_lttpr_phy_caps() return a pointer to the whole array
  instead of a pointer to its first element. (Ville)
- Add the intel_dp_phy_is_downstream_of_source() helper. (Ville)
- Add a code comment about the disable->enable quirk of
  non-transparent mode.
- Add the intel_dp_training_pattern_set_reg() helper.
- Fix checkpatch/sparse warns.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201007170917.1764556-7-imre.deak@intel.com
zero-sugar-mainline-defconfig
Imre Deak 2020-10-07 20:09:17 +03:00
parent 7b2a4ab8b0
commit b30edfd8d0
5 changed files with 321 additions and 73 deletions

View File

@ -1303,6 +1303,7 @@ struct intel_dp {
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
u8 fec_capable;
/* source rates */
int num_source_rates;

View File

@ -160,6 +160,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
162000, 270000, 540000, 810000
};
int i, max_rate;
int max_lttpr_rate;
if (drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
@ -173,6 +174,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
}
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
max_rate = min(max_rate, max_lttpr_rate);
for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
if (dp_rates[i] > max_rate)
@ -218,6 +222,10 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
int source_max = dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
if (lttpr_max)
sink_max = min(sink_max, lttpr_max);
return min3(source_max, sink_max, fia_max);
}
@ -4179,17 +4187,6 @@ static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
chv_phy_post_pll_disable(encoder, old_crtc_state);
}
/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@ -5592,13 +5589,15 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
&intel_dp->compliance.test_data.phytest;
u8 link_status[DP_LINK_STATUS_SIZE];
if (!intel_dp_get_link_status(intel_dp, link_status)) {
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
link_status) < 0) {
DRM_DEBUG_KMS("failed to get link status\n");
return;
}
/* retrieve vswing & pre-emphasis setting */
intel_dp_get_adjust_train(intel_dp, crtc_state, link_status);
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
link_status);
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
@ -5756,7 +5755,8 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (intel_psr_enabled(intel_dp))
return false;
if (!intel_dp_get_link_status(intel_dp, link_status))
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
link_status) < 0)
return false;
/*

View File

@ -102,8 +102,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);

View File

@ -34,6 +34,63 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
{
int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
* Pretend no LTTPRs in case of LTTPR detection error, or
* if too many (>8) LTTPRs are detected. This translates to link
* training in transparent mode.
*/
return count <= 0 ? 0 : count;
}
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}
static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
char *buf, size_t buf_size)
{
if (dp_phy == DP_PHY_DPRX)
snprintf(buf, buf_size, "DPRX");
else
snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
return buf;
}
static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
}
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
char phy_name[10];
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"failed to read the PHY caps for %s\n",
phy_name);
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"%s PHY capabilities: %*ph\n",
phy_name,
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
@ -64,24 +121,64 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
* intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
* @intel_dp: Intel DP struct
*
* Read the LTTPR common capabilities and switch to transparent link training
* mode.
* Read the LTTPR common capabilities, switch to non-transparent link training
* mode if any is detected and read the PHY capabilities for all detected
* LTTPRs. In case of an LTTPR detection error or if the number of
* LTTPRs is more than is supported (8), fall back to the no-LTTPR,
* transparent mode link training mode.
*
* Returns:
* >0 if LTTPRs were detected and the non-transparent LT mode was set
* 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
* detection failure and the transparent LT mode was set
*/
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
{
int lttpr_count;
bool ret;
int i;
if (intel_dp_is_edp(intel_dp))
return 0;
intel_dp_read_lttpr_common_caps(intel_dp);
ret = intel_dp_read_lttpr_common_caps(intel_dp);
/*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
* non-transparent mode.
* non-transparent mode and the disable->enable non-transparent mode
* sequence.
*/
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
return 0;
if (!ret)
return 0;
lttpr_count = intel_dp_lttpr_count(intel_dp);
/*
* In case of unsupported number of LTTPRs or failing to switch to
* non-transparent mode fall-back to transparent link training mode,
* still taking into account any LTTPR common lane- rate/count limits.
*/
if (lttpr_count == 0)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
intel_dp_reset_lttpr_count(intel_dp);
return 0;
}
for (i = 0; i < lttpr_count; i++)
intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));
return lttpr_count;
}
EXPORT_SYMBOL(intel_dp_lttpr_init);
static u8 dp_voltage_max(u8 preemph)
{
@ -98,12 +195,91 @@ static u8 dp_voltage_max(u8 preemph)
}
}
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
return DP_TRAIN_PRE_EMPH_LEVEL_3;
else
return DP_TRAIN_PRE_EMPH_LEVEL_2;
}
static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int lttpr_count = intel_dp_lttpr_count(intel_dp);
drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage_max;
/*
* Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
* the DPRX_PHY we train.
*/
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
else
voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
drm_WARN_ON_ONCE(&i915->drm,
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
return voltage_max;
}
static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 preemph_max;
/*
* Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
* the DPRX_PHY we train.
*/
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
preemph_max = intel_dp->preemph_max(intel_dp);
else
preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
drm_WARN_ON_ONCE(&i915->drm,
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
return preemph_max;
}
void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 v = 0;
u8 p = 0;
int lane;
@ -115,21 +291,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
}
preemph_max = intel_dp->preemph_max(intel_dp);
drm_WARN_ON_ONCE(&i915->drm,
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
v = min(v, dp_voltage_max(p));
voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
drm_WARN_ON_ONCE(&i915->drm,
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
@ -137,11 +305,21 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
intel_dp->train_set[lane] = v | p;
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
return dp_phy == DP_PHY_DPRX ?
DP_TRAINING_PATTERN_SET :
DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
}
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
u8 dp_train_pat)
{
int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
u8 buf[sizeof(intel_dp->train_set) + 1];
int len;
@ -153,29 +331,33 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
len = crtc_state->lane_count + 1;
return drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len) == len;
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, crtc_state);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_train_pat);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
static bool
intel_dp_update_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
int reg = dp_phy == DP_PHY_DPRX ?
DP_TRAINING_LANE0_SET :
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
intel_dp_set_signal_levels(intel_dp, crtc_state);
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
return ret == crtc_state->lane_count;
@ -240,10 +422,23 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
return true;
}
/* Perform the link training clock recovery phase using training pattern 1. */
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX)
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
else
drm_dp_lttpr_link_train_clock_recovery_delay();
}
/*
* Perform the link training clock recovery phase on the given DP PHY using
* training pattern 1.
*/
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage;
@ -251,7 +446,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
bool max_vswing_reached = false;
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state,
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
drm_err(&i915->drm, "failed to enable link training\n");
@ -275,9 +470,10 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
u8 link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm, "failed to get link status\n");
return false;
}
@ -301,8 +497,9 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
intel_dp_get_adjust_train(intel_dp, crtc_state, link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state)) {
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"failed to update link training\n");
return false;
@ -329,7 +526,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
* or 1.2 devices that support it, Training Pattern 2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
@ -338,9 +536,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* for all downstream devices that support HBR3. There are no known eDP
* panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
* specification.
* LTTPRs must support TPS4.
*/
source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
return DP_TRAINING_PATTERN_4;
} else if (crtc_state->port_clock == 810000) {
@ -357,7 +557,8 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* all sinks follow the spec.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
return DP_TRAINING_PATTERN_3;
} else if (crtc_state->port_clock >= 540000) {
@ -372,13 +573,28 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
return DP_TRAINING_PATTERN_2;
}
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX) {
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
} else {
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
}
}
/*
* Perform the link training channel equalization phase using one of training
* pattern 2, 3 or 4 depending on the source and sink capabilities.
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
* sink capabilities.
*/
static bool
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int tries;
@ -386,22 +602,23 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state);
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
if (training_pattern != DP_TRAINING_PATTERN_4)
training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, crtc_state,
if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
training_pattern)) {
drm_err(&i915->drm, "failed to start channel equalization\n");
return false;
}
for (tries = 0; tries < 5; tries++) {
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_training_channel_equalization_delay(intel_dp,
dp_phy);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,
"failed to get link status\n");
break;
@ -426,8 +643,9 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
/* Update training set as requested by target */
intel_dp_get_adjust_train(intel_dp, crtc_state, link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state)) {
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"failed to update link training\n");
break;
@ -441,17 +659,16 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
"Channel equalization failed 5 times\n");
}
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
return channel_eq;
}
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp)
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
u8 val = DP_TRAINING_PATTERN_DISABLE;
return drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, &val, 1) == 1;
return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
}
/**
@ -478,33 +695,34 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
intel_dp_program_link_training_pattern(intel_dp,
crtc_state,
DP_TRAINING_PATTERN_DISABLE);
intel_dp_disable_dpcd_training_pattern(intel_dp);
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
}
static bool
intel_dp_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
intel_dp_link_train_phy(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
char phy_name[10];
bool ret = false;
intel_dp_prepare_link_train(intel_dp, crtc_state);
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state))
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
goto out;
if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state))
if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
goto out;
ret = true;
out:
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d",
"[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
intel_connector->base.base.id,
intel_connector->base.name,
ret ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
crtc_state->port_clock, crtc_state->lane_count,
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
return ret;
}
@ -528,6 +746,36 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
schedule_work(&intel_connector->modeset_retry_work);
}
/* Perform the link training on all LTTPRs and the DPRX on a link. */
static bool
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
int lttpr_count)
{
bool ret = true;
int i;
intel_dp_prepare_link_train(intel_dp, crtc_state);
for (i = lttpr_count - 1; i >= 0; i--) {
enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
if (!ret)
break;
}
if (ret)
intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
return ret;
}
/**
* intel_dp_start_link_train - start link training
* @intel_dp: DP struct
@ -545,8 +793,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
*/
intel_dp_lttpr_init(intel_dp);
int lttpr_count = intel_dp_lttpr_init(intel_dp);
if (!intel_dp_link_train(intel_dp, crtc_state))
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}

View File

@ -15,6 +15,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp);
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);