mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
drm fixes for 5.5-rc7
core mst: - serialize down messages and clear timeslots are on unplug amdgpu: - Update golden settings for renoir - eDP fix i915: - uAPI fix: Remove dash and colon from PMU names to comply with tools/perf - Fix for include file that was indirectly included - Two fixes to make sure VMA are marked active for error capture virtio: - maintain obj reservation lock when submitting cmds rockchip: - increase link rate var size to accommodate rates -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJeI3ZMAAoJEAx081l5xIa+iuUQAIIMTL86+QPr3CSu8fU7albJ MQWOzycWn0xP0KNYIvKz2X6dePWAEDi2kMM+6M/htzi32XmyG0oiAat2wAGkK3ir AtfOnbKdD6T0i5wwkAn8SvNYHOTHdg7HMKTzPw2rxlTlZ8IEdVxo1CZqHZMFg0PV UiK2VWVBPmBwdRgLu5K8XJy5LKbpx9BMWW5KdS96L2lKNVV3qeH3Q7NvAh1PtJp4 W+83OsYxYNwS0//4v0i0MjCGVXq5qVw7tzplAPW2OpUCMZ4TtVUnW2/RcrqvU9/b v18M7thf2uccMXs7E1gyHgvrZp2lw6KsyJEpPnIp4k+2OXM0BVe9l4Pl0uUQhrDH EUvCkpmP1/Zu44CJhtmVAZlVyy3ShNY5NeF6OsCZDSL7z60YMZHFVKF1IgrBkOJF mXXq4eb0rMh3At6ABNliRo/bzEYfGgVy1xxNzOoTKCp6pNK/OGy4xQ/DEAkZbBBx ul6E+NjaHhul1daWqpuv37oxsjE0RI7asFBomC5HTNWVZHY8MN8+9p/TTZQJ8n0l yxMlPy2OOWVfwclwRSLant3Ny1hXNysMpWXMtrEtcB3Q5GI7H4ZCNh9b+VUPLjXT JPZU2ZSIjrYNBeKYjVGV75HKjyMWJv1aB3kiXq7lX1yFq7wQLd5nE5mjTSD9wr1I ZnhFSbHLcR7VMWUldPPi =Vwa3 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Back from LCA2020, fixes wasn't too busy last week, seems to have quieten down appropriately, some amdgpu, i915, then a core mst fix and one fix for virtio-gpu and one for rockchip: core mst: - serialize down messages and clear timeslots are on unplug amdgpu: - Update golden settings for renoir - eDP fix i915: - uAPI fix: Remove dash and colon from PMU names to comply with tools/perf - Fix for include file that was indirectly included - Two fixes to make sure VMA are marked active for error capture virtio: - maintain obj reservation lock when submitting cmds rockchip: - increase link rate var size to accommodate rates" * tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm: drm/amd/display: Reorder detect_edp_sink_caps before link settings read. drm/amdgpu: update goldensetting for renoir drm/dp_mst: Have DP_Tx send one msg at a time drm/dp_mst: clear time slots for ports invalid drm/i915/pmu: Do not use colons or dashes in PMU names drm/rockchip: fix integer type used for storing dp data rate drm/i915/gt: Mark ring->vma as active while pinned drm/i915/gt: Mark context->state vma as active while pinned drm/i915/gt: Skip trying to unbind in restore_ggtt_mappings drm/i915: Add missing include file <linux/math64.h> drm/virtio: add missing virtio_gpu_array_lock_resv call
This commit is contained in:
commit
244dc26890
@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
|
|||||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
|
||||||
};
|
};
|
||||||
|
|
||||||
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
||||||
|
@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||||||
}
|
}
|
||||||
|
|
||||||
case SIGNAL_TYPE_EDP: {
|
case SIGNAL_TYPE_EDP: {
|
||||||
read_current_link_settings_on_detect(link);
|
|
||||||
detect_edp_sink_caps(link);
|
detect_edp_sink_caps(link);
|
||||||
|
read_current_link_settings_on_detect(link);
|
||||||
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
|
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
|
||||||
sink_caps.signal = SIGNAL_TYPE_EDP;
|
sink_caps.signal = SIGNAL_TYPE_EDP;
|
||||||
break;
|
break;
|
||||||
|
@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
|
|||||||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
|
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
|
||||||
mstb->tx_slots[txmsg->seqno] = NULL;
|
mstb->tx_slots[txmsg->seqno] = NULL;
|
||||||
}
|
}
|
||||||
|
mgr->is_waiting_for_dwn_reply = false;
|
||||||
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
|
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
|
||||||
@ -1199,6 +1201,7 @@ out:
|
|||||||
}
|
}
|
||||||
mutex_unlock(&mgr->qlock);
|
mutex_unlock(&mgr->qlock);
|
||||||
|
|
||||||
|
drm_dp_mst_kick_tx(mgr);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2318,7 +2321,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
|
|||||||
{
|
{
|
||||||
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
|
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
|
||||||
struct drm_dp_mst_port *port;
|
struct drm_dp_mst_port *port;
|
||||||
int old_ddps, ret;
|
int old_ddps, old_input, ret, i;
|
||||||
u8 new_pdt;
|
u8 new_pdt;
|
||||||
bool dowork = false, create_connector = false;
|
bool dowork = false, create_connector = false;
|
||||||
|
|
||||||
@ -2349,6 +2352,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
old_ddps = port->ddps;
|
old_ddps = port->ddps;
|
||||||
|
old_input = port->input;
|
||||||
port->input = conn_stat->input_port;
|
port->input = conn_stat->input_port;
|
||||||
port->mcs = conn_stat->message_capability_status;
|
port->mcs = conn_stat->message_capability_status;
|
||||||
port->ldps = conn_stat->legacy_device_plug_status;
|
port->ldps = conn_stat->legacy_device_plug_status;
|
||||||
@ -2373,6 +2377,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
|
|||||||
dowork = false;
|
dowork = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!old_input && old_ddps != port->ddps && !port->ddps) {
|
||||||
|
for (i = 0; i < mgr->max_payloads; i++) {
|
||||||
|
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
|
||||||
|
struct drm_dp_mst_port *port_validated;
|
||||||
|
|
||||||
|
if (!vcpi)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
port_validated =
|
||||||
|
container_of(vcpi, struct drm_dp_mst_port, vcpi);
|
||||||
|
port_validated =
|
||||||
|
drm_dp_mst_topology_get_port_validated(mgr, port_validated);
|
||||||
|
if (!port_validated) {
|
||||||
|
mutex_lock(&mgr->payload_lock);
|
||||||
|
vcpi->num_slots = 0;
|
||||||
|
mutex_unlock(&mgr->payload_lock);
|
||||||
|
} else {
|
||||||
|
drm_dp_mst_topology_put_port(port_validated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (port->connector)
|
if (port->connector)
|
||||||
drm_modeset_unlock(&mgr->base.lock);
|
drm_modeset_unlock(&mgr->base.lock);
|
||||||
else if (create_connector)
|
else if (create_connector)
|
||||||
@ -2718,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
|
|||||||
ret = process_single_tx_qlock(mgr, txmsg, false);
|
ret = process_single_tx_qlock(mgr, txmsg, false);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
/* txmsg is sent it should be in the slots now */
|
/* txmsg is sent it should be in the slots now */
|
||||||
|
mgr->is_waiting_for_dwn_reply = true;
|
||||||
list_del(&txmsg->next);
|
list_del(&txmsg->next);
|
||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
|
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
|
||||||
|
mgr->is_waiting_for_dwn_reply = false;
|
||||||
list_del(&txmsg->next);
|
list_del(&txmsg->next);
|
||||||
if (txmsg->seqno != -1)
|
if (txmsg->seqno != -1)
|
||||||
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
|
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
|
||||||
@ -2760,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
|
|||||||
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
|
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list_is_singular(&mgr->tx_msg_downq))
|
if (list_is_singular(&mgr->tx_msg_downq) &&
|
||||||
|
!mgr->is_waiting_for_dwn_reply)
|
||||||
process_single_down_tx_qlock(mgr);
|
process_single_down_tx_qlock(mgr);
|
||||||
mutex_unlock(&mgr->qlock);
|
mutex_unlock(&mgr->qlock);
|
||||||
}
|
}
|
||||||
@ -3678,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
|||||||
mutex_lock(&mgr->qlock);
|
mutex_lock(&mgr->qlock);
|
||||||
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
|
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
|
||||||
mstb->tx_slots[slot] = NULL;
|
mstb->tx_slots[slot] = NULL;
|
||||||
|
mgr->is_waiting_for_dwn_reply = false;
|
||||||
mutex_unlock(&mgr->qlock);
|
mutex_unlock(&mgr->qlock);
|
||||||
|
|
||||||
wake_up_all(&mgr->tx_waitq);
|
wake_up_all(&mgr->tx_waitq);
|
||||||
@ -3687,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
|||||||
no_msg:
|
no_msg:
|
||||||
drm_dp_mst_topology_put_mstb(mstb);
|
drm_dp_mst_topology_put_mstb(mstb);
|
||||||
clear_down_rep_recv:
|
clear_down_rep_recv:
|
||||||
|
mutex_lock(&mgr->qlock);
|
||||||
|
mgr->is_waiting_for_dwn_reply = false;
|
||||||
|
mutex_unlock(&mgr->qlock);
|
||||||
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -4497,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work)
|
|||||||
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
|
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
|
||||||
|
|
||||||
mutex_lock(&mgr->qlock);
|
mutex_lock(&mgr->qlock);
|
||||||
if (!list_empty(&mgr->tx_msg_downq))
|
if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
|
||||||
process_single_down_tx_qlock(mgr);
|
process_single_down_tx_qlock(mgr);
|
||||||
mutex_unlock(&mgr->qlock);
|
mutex_unlock(&mgr->qlock);
|
||||||
}
|
}
|
||||||
|
@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
err = i915_active_acquire(&vma->active);
|
||||||
|
if (err)
|
||||||
|
goto err_unpin;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* And mark it as a globally pinned object to let the shrinker know
|
* And mark it as a globally pinned object to let the shrinker know
|
||||||
* it cannot reclaim the object until we release it.
|
* it cannot reclaim the object until we release it.
|
||||||
@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma)
|
|||||||
vma->obj->mm.dirty = true;
|
vma->obj->mm.dirty = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_unpin:
|
||||||
|
i915_vma_unpin(vma);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __context_unpin_state(struct i915_vma *vma)
|
static void __context_unpin_state(struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
i915_vma_make_shrinkable(vma);
|
i915_vma_make_shrinkable(vma);
|
||||||
|
i915_active_release(&vma->active);
|
||||||
__i915_vma_unpin(vma);
|
__i915_vma_unpin(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __ring_active(struct intel_ring *ring)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = i915_active_acquire(&ring->vma->active);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = intel_ring_pin(ring);
|
||||||
|
if (err)
|
||||||
|
goto err_active;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_active:
|
||||||
|
i915_active_release(&ring->vma->active);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __ring_retire(struct intel_ring *ring)
|
||||||
|
{
|
||||||
|
intel_ring_unpin(ring);
|
||||||
|
i915_active_release(&ring->vma->active);
|
||||||
|
}
|
||||||
|
|
||||||
__i915_active_call
|
__i915_active_call
|
||||||
static void __intel_context_retire(struct i915_active *active)
|
static void __intel_context_retire(struct i915_active *active)
|
||||||
{
|
{
|
||||||
@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active)
|
|||||||
__context_unpin_state(ce->state);
|
__context_unpin_state(ce->state);
|
||||||
|
|
||||||
intel_timeline_unpin(ce->timeline);
|
intel_timeline_unpin(ce->timeline);
|
||||||
intel_ring_unpin(ce->ring);
|
__ring_retire(ce->ring);
|
||||||
|
|
||||||
intel_context_put(ce);
|
intel_context_put(ce);
|
||||||
}
|
}
|
||||||
@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active)
|
|||||||
|
|
||||||
intel_context_get(ce);
|
intel_context_get(ce);
|
||||||
|
|
||||||
err = intel_ring_pin(ce->ring);
|
err = __ring_active(ce->ring);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_put;
|
goto err_put;
|
||||||
|
|
||||||
@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active)
|
|||||||
err_timeline:
|
err_timeline:
|
||||||
intel_timeline_unpin(ce->timeline);
|
intel_timeline_unpin(ce->timeline);
|
||||||
err_ring:
|
err_ring:
|
||||||
intel_ring_unpin(ce->ring);
|
__ring_retire(ce->ring);
|
||||||
err_put:
|
err_put:
|
||||||
intel_context_put(ce);
|
intel_context_put(ce);
|
||||||
return err;
|
return err;
|
||||||
|
@ -3304,7 +3304,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
|
|||||||
|
|
||||||
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
|
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
|
||||||
{
|
{
|
||||||
struct i915_vma *vma, *vn;
|
struct i915_vma *vma;
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
int open;
|
int open;
|
||||||
|
|
||||||
@ -3319,15 +3319,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
|
|||||||
open = atomic_xchg(&ggtt->vm.open, 0);
|
open = atomic_xchg(&ggtt->vm.open, 0);
|
||||||
|
|
||||||
/* clflush objects bound into the GGTT and rebind them. */
|
/* clflush objects bound into the GGTT and rebind them. */
|
||||||
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
|
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
|
||||||
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
|
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!__i915_vma_unbind(vma))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
|
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
|
||||||
WARN_ON(i915_vma_bind(vma,
|
WARN_ON(i915_vma_bind(vma,
|
||||||
obj ? obj->cache_level : 0,
|
obj ? obj->cache_level : 0,
|
||||||
|
@ -1074,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||||||
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
pmu->timer.function = i915_sample;
|
pmu->timer.function = i915_sample;
|
||||||
|
|
||||||
if (!is_igp(i915))
|
if (!is_igp(i915)) {
|
||||||
pmu->name = kasprintf(GFP_KERNEL,
|
pmu->name = kasprintf(GFP_KERNEL,
|
||||||
"i915-%s",
|
"i915_%s",
|
||||||
dev_name(i915->drm.dev));
|
dev_name(i915->drm.dev));
|
||||||
else
|
if (pmu->name) {
|
||||||
|
/* tools/perf reserves colons as special. */
|
||||||
|
strreplace((char *)pmu->name, ':', '_');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
pmu->name = "i915";
|
pmu->name = "i915";
|
||||||
|
}
|
||||||
if (!pmu->name)
|
if (!pmu->name)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#ifndef __I915_SELFTESTS_RANDOM_H__
|
#ifndef __I915_SELFTESTS_RANDOM_H__
|
||||||
#define __I915_SELFTESTS_RANDOM_H__
|
#define __I915_SELFTESTS_RANDOM_H__
|
||||||
|
|
||||||
|
#include <linux/math64.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
|
|
||||||
#include "../i915_selftest.h"
|
#include "../i915_selftest.h"
|
||||||
|
@ -95,7 +95,7 @@ struct cdn_dp_device {
|
|||||||
struct cdn_dp_port *port[MAX_PHY];
|
struct cdn_dp_port *port[MAX_PHY];
|
||||||
u8 ports;
|
u8 ports;
|
||||||
u8 max_lanes;
|
u8 max_lanes;
|
||||||
u8 max_rate;
|
unsigned int max_rate;
|
||||||
u8 lanes;
|
u8 lanes;
|
||||||
int active_port;
|
int active_port;
|
||||||
|
|
||||||
|
@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
|
|||||||
if (!objs)
|
if (!objs)
|
||||||
return;
|
return;
|
||||||
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
|
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
|
||||||
|
virtio_gpu_array_lock_resv(objs);
|
||||||
virtio_gpu_cmd_transfer_to_host_2d
|
virtio_gpu_cmd_transfer_to_host_2d
|
||||||
(vgdev, 0,
|
(vgdev, 0,
|
||||||
plane->state->crtc_w,
|
plane->state->crtc_w,
|
||||||
|
@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
|
|||||||
* &drm_dp_sideband_msg_tx.state once they are queued
|
* &drm_dp_sideband_msg_tx.state once they are queued
|
||||||
*/
|
*/
|
||||||
struct mutex qlock;
|
struct mutex qlock;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
|
||||||
|
*/
|
||||||
|
bool is_waiting_for_dwn_reply;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @tx_msg_downq: List of pending down replies.
|
* @tx_msg_downq: List of pending down replies.
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user