drm/display: dp-mst-topology: use new DCPD access helpers

Switch drm_dp_mst_topology.c to use new set of DPCD read / write helpers.

Reviewed-by: Lyude Paul <lyude@redhat.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-5-e80ff89593df@oss.qualcomm.com
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
This commit is contained in:
Dmitry Baryshkov 2025-03-24 13:51:23 +02:00 committed by Dmitry Baryshkov
parent 97f3793988
commit 2554da0de3

View File

@ -2201,15 +2201,12 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
mstb->port_parent, mstb->port_parent,
DP_GUID, sizeof(buf), buf); DP_GUID, sizeof(buf), buf);
} else { } else {
ret = drm_dp_dpcd_write(mstb->mgr->aux, ret = drm_dp_dpcd_write_data(mstb->mgr->aux,
DP_GUID, buf, sizeof(buf)); DP_GUID, buf, sizeof(buf));
} }
} }
if (ret < 16 && ret > 0) return ret;
return -EPROTO;
return ret == 16 ? 0 : ret;
} }
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@ -2744,14 +2741,13 @@ static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
do { do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
&msg[offset], &msg[offset],
tosend); tosend);
if (ret != tosend) { if (ret == -EIO && retries < 5) {
if (ret == -EIO && retries < 5) { retries++;
retries++; goto retry;
goto retry; } else if (ret < 0) {
}
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO; return -EIO;
@ -3624,7 +3620,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST; return DRM_DP_SST;
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST; return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP) if (mstm_cap & DP_MST_CAP)
@ -3679,10 +3675,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb; mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary); drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_MST_EN |
DP_UP_REQ_EN | DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC); DP_UPSTREAM_IS_SRC);
if (ret < 0) if (ret < 0)
goto out_unlock; goto out_unlock;
@ -3697,7 +3693,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary; mstb = mgr->mst_primary;
mgr->mst_primary = NULL; mgr->mst_primary = NULL;
/* this can fail if the device is gone */ /* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0; ret = 0;
mgr->payload_id_table_cleared = false; mgr->payload_id_table_cleared = false;
@ -3763,8 +3759,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{ {
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC); DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work); flush_work(&mgr->up_req_work);
flush_work(&mgr->work); flush_work(&mgr->work);
@ -3813,18 +3809,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail; goto out_fail;
} }
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_MST_EN |
DP_UP_REQ_EN | DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC); DP_UPSTREAM_IS_SRC);
if (ret < 0) { if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail; goto out_fail;
} }
/* Some hubs forget their guids after they resume */ /* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
if (ret != sizeof(buf)) { if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail; goto out_fail;
} }
@ -3883,8 +3879,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL; *mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16); len = min(mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
if (ret != len) { if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false; return false;
} }
@ -3922,9 +3918,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len; curreply = len;
while (replylen > 0) { while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
replyblock, len); replyblock, len);
if (ret != len) { if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret); len, ret);
return false; return false;
@ -4873,9 +4869,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i; int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
if (drm_dp_dpcd_read(mgr->aux, if (drm_dp_dpcd_read_data(mgr->aux,
DP_PAYLOAD_TABLE_UPDATE_STATUS + i, DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
&buf[i], 16) != 16) &buf[i], 16) < 0)
return false; return false;
} }
return true; return true;
@ -4964,23 +4960,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
} }
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
if (ret != 2) { if (ret < 0) {
seq_printf(m, "faux/mst read failed\n"); seq_printf(m, "faux/mst read failed\n");
goto out; goto out;
} }
seq_printf(m, "faux/mst: %*ph\n", 2, buf); seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
if (ret != 1) { if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n"); seq_printf(m, "mst ctrl read failed\n");
goto out; goto out;
} }
seq_printf(m, "mst ctrl: %*ph\n", 1, buf); seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */ /* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
if (ret != DP_BRANCH_OUI_HEADER_SIZE) { DP_BRANCH_OUI_HEADER_SIZE);
if (ret < 0) {
seq_printf(m, "branch oui read failed\n"); seq_printf(m, "branch oui read failed\n");
goto out; goto out;
} }
@ -6104,14 +6101,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */ /* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
if (drm_dp_dpcd_read(&port->aux, if (drm_dp_dpcd_read_data(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL; return NULL;
if (drm_dp_dpcd_read(&port->aux, if (drm_dp_dpcd_read_data(&port->aux,
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL; return NULL;
if (drm_dp_dpcd_read(&immediate_upstream_port->aux, if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL; return NULL;
/* Enpoint decompression with DP-to-DP peer device */ /* Enpoint decompression with DP-to-DP peer device */
@ -6149,8 +6146,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
if (drm_dp_dpcd_read(immediate_upstream_aux, if (drm_dp_dpcd_read_data(immediate_upstream_aux,
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL; return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@ -6172,11 +6169,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be * therefore the endpoint needs to be
* both DSC and FEC capable. * both DSC and FEC capable.
*/ */
if (drm_dp_dpcd_read(&port->aux, if (drm_dp_dpcd_read_data(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL; return NULL;
if (drm_dp_dpcd_read(&port->aux, if (drm_dp_dpcd_read_data(&port->aux,
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL; return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE)) (endpoint_fec & DP_FEC_CAPABLE))