2019-06-04 08:11:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-03-02 21:01:12 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 Broadcom
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: VC4 KMS
|
|
|
|
*
|
|
|
|
* This is the general code for implementing KMS mode setting that
|
|
|
|
* doesn't clearly associate with any of the other objects (plane,
|
|
|
|
* crtc, HDMI encoder).
|
|
|
|
*/
|
|
|
|
|
2020-09-03 08:00:35 +00:00
|
|
|
#include <linux/clk.h>
|
|
|
|
|
2017-05-18 04:29:38 +00:00
|
|
|
#include <drm/drm_atomic.h>
|
|
|
|
#include <drm/drm_atomic_helper.h>
|
2019-07-16 06:42:07 +00:00
|
|
|
#include <drm/drm_crtc.h>
|
2017-08-13 13:32:03 +00:00
|
|
|
#include <drm/drm_gem_framebuffer_helper.h>
|
2019-01-17 21:03:34 +00:00
|
|
|
#include <drm/drm_plane_helper.h>
|
|
|
|
#include <drm/drm_probe_helper.h>
|
2019-07-16 06:42:07 +00:00
|
|
|
#include <drm/drm_vblank.h>
|
|
|
|
|
2015-03-02 21:01:12 +00:00
|
|
|
#include "vc4_drv.h"
|
2018-04-20 12:25:44 +00:00
|
|
|
#include "vc4_regs.h"
|
|
|
|
|
2020-11-05 13:56:52 +00:00
|
|
|
#define HVS_NUM_CHANNELS 3
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
struct vc4_ctm_state {
|
|
|
|
struct drm_private_state base;
|
|
|
|
struct drm_color_ctm *ctm;
|
|
|
|
int fifo;
|
|
|
|
};
|
|
|
|
|
2022-03-28 12:43:03 +00:00
|
|
|
static struct vc4_ctm_state *
|
|
|
|
to_vc4_ctm_state(const struct drm_private_state *priv)
|
2018-04-20 12:25:44 +00:00
|
|
|
{
|
|
|
|
return container_of(priv, struct vc4_ctm_state, base);
|
|
|
|
}
|
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
struct vc4_hvs_state {
|
|
|
|
struct drm_private_state base;
|
2021-10-25 15:29:03 +00:00
|
|
|
unsigned long core_clock_rate;
|
2020-12-04 15:11:35 +00:00
|
|
|
|
|
|
|
struct {
|
|
|
|
unsigned in_use: 1;
|
2021-10-25 15:29:03 +00:00
|
|
|
unsigned long fifo_load;
|
2020-12-04 15:11:35 +00:00
|
|
|
struct drm_crtc_commit *pending_commit;
|
|
|
|
} fifo_state[HVS_NUM_CHANNELS];
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct vc4_hvs_state *
|
2022-03-28 12:43:03 +00:00
|
|
|
to_vc4_hvs_state(const struct drm_private_state *priv)
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
{
|
|
|
|
return container_of(priv, struct vc4_hvs_state, base);
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:51:23 +00:00
|
|
|
struct vc4_load_tracker_state {
|
|
|
|
struct drm_private_state base;
|
|
|
|
u64 hvs_load;
|
|
|
|
u64 membus_load;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vc4_load_tracker_state *
|
2022-03-28 12:43:03 +00:00
|
|
|
to_vc4_load_tracker_state(const struct drm_private_state *priv)
|
2019-02-20 15:51:23 +00:00
|
|
|
{
|
|
|
|
return container_of(priv, struct vc4_load_tracker_state, base);
|
|
|
|
}
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
|
|
|
|
struct drm_private_obj *manager)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = state->dev;
|
2020-10-29 19:01:02 +00:00
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
2018-04-20 12:25:44 +00:00
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_private_obj_state(state, manager);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return ERR_CAST(priv_state);
|
|
|
|
|
|
|
|
return to_vc4_ctm_state(priv_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_private_state *
|
|
|
|
vc4_ctm_duplicate_state(struct drm_private_obj *obj)
|
|
|
|
{
|
|
|
|
struct vc4_ctm_state *state;
|
|
|
|
|
|
|
|
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
|
|
|
|
if (!state)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
|
|
|
|
|
|
|
return &state->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
|
|
|
|
struct drm_private_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
|
|
|
|
|
|
|
|
kfree(ctm_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
|
|
|
|
.atomic_duplicate_state = vc4_ctm_duplicate_state,
|
|
|
|
.atomic_destroy_state = vc4_ctm_destroy_state,
|
|
|
|
};
|
|
|
|
|
2020-10-29 19:01:04 +00:00
|
|
|
static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
|
|
|
|
drm_atomic_private_obj_fini(&vc4->ctm_manager);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vc4_ctm_obj_init(struct vc4_dev *vc4)
|
|
|
|
{
|
|
|
|
struct vc4_ctm_state *ctm_state;
|
|
|
|
|
|
|
|
drm_modeset_lock_init(&vc4->ctm_state_lock);
|
|
|
|
|
|
|
|
ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
|
|
|
|
if (!ctm_state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
|
|
|
|
&vc4_ctm_state_funcs);
|
|
|
|
|
2020-11-05 13:56:50 +00:00
|
|
|
return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
|
2020-10-29 19:01:04 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
/* Converts a DRM S31.32 value to the HW S0.9 format. */
|
|
|
|
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
|
|
|
|
{
|
|
|
|
u16 r;
|
|
|
|
|
|
|
|
/* Sign bit. */
|
|
|
|
r = in & BIT_ULL(63) ? BIT(9) : 0;
|
|
|
|
|
|
|
|
if ((in & GENMASK_ULL(62, 32)) > 0) {
|
|
|
|
/* We have zero integer bits so we can only saturate here. */
|
|
|
|
r |= GENMASK(8, 0);
|
|
|
|
} else {
|
|
|
|
/* Otherwise take the 9 most important fractional bits. */
|
|
|
|
r |= (in >> 23) & GENMASK(8, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
|
|
|
|
{
|
2022-03-31 14:37:44 +00:00
|
|
|
struct vc4_hvs *hvs = vc4->hvs;
|
2018-04-20 12:25:44 +00:00
|
|
|
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
|
|
|
|
struct drm_color_ctm *ctm = ctm_state->ctm;
|
|
|
|
|
|
|
|
if (ctm_state->fifo) {
|
|
|
|
HVS_WRITE(SCALER_OLEDCOEF2,
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
|
|
|
|
SCALER_OLEDCOEF2_R_TO_R) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
|
|
|
|
SCALER_OLEDCOEF2_R_TO_G) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
|
|
|
|
SCALER_OLEDCOEF2_R_TO_B));
|
|
|
|
HVS_WRITE(SCALER_OLEDCOEF1,
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
|
|
|
|
SCALER_OLEDCOEF1_G_TO_R) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
|
|
|
|
SCALER_OLEDCOEF1_G_TO_G) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
|
|
|
|
SCALER_OLEDCOEF1_G_TO_B));
|
|
|
|
HVS_WRITE(SCALER_OLEDCOEF0,
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
|
|
|
|
SCALER_OLEDCOEF0_B_TO_R) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
|
|
|
|
SCALER_OLEDCOEF0_B_TO_G) |
|
|
|
|
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
|
|
|
|
SCALER_OLEDCOEF0_B_TO_B));
|
|
|
|
}
|
|
|
|
|
|
|
|
HVS_WRITE(SCALER_OLEDOFFS,
|
|
|
|
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
|
|
|
|
}
|
2015-03-02 21:01:12 +00:00
|
|
|
|
2020-12-04 15:11:35 +00:00
|
|
|
static struct vc4_hvs_state *
|
|
|
|
vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return ERR_CAST(priv_state);
|
|
|
|
|
|
|
|
return to_vc4_hvs_state(priv_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vc4_hvs_state *
|
|
|
|
vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return ERR_CAST(priv_state);
|
|
|
|
|
|
|
|
return to_vc4_hvs_state(priv_state);
|
|
|
|
}
|
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
static struct vc4_hvs_state *
|
|
|
|
vc4_hvs_get_global_state(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return ERR_CAST(priv_state);
|
|
|
|
|
|
|
|
return to_vc4_hvs_state(priv_state);
|
|
|
|
}
|
|
|
|
|
2020-09-03 08:00:46 +00:00
|
|
|
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
|
|
|
struct drm_atomic_state *state)
|
|
|
|
{
|
2022-03-31 14:37:44 +00:00
|
|
|
struct vc4_hvs *hvs = vc4->hvs;
|
2020-09-03 08:00:46 +00:00
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
2021-10-25 14:11:05 +00:00
|
|
|
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
2020-09-03 08:00:46 +00:00
|
|
|
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
|
|
|
u32 dispctrl;
|
|
|
|
u32 dsp3_mux;
|
|
|
|
|
|
|
|
if (!crtc_state->active)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vc4_state->assigned_channel != 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
|
|
|
|
* FIFO X'.
|
|
|
|
* SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
|
|
|
|
*
|
|
|
|
* DSP3 is connected to FIFO2 unless the transposer is
|
|
|
|
* enabled. In this case, FIFO 2 is directly accessed by the
|
|
|
|
* TXP IP, and we need to disable the FIFO2 -> pixelvalve1
|
|
|
|
* route.
|
|
|
|
*/
|
2021-10-25 14:11:05 +00:00
|
|
|
if (vc4_crtc->feeds_txp)
|
2020-09-03 08:00:46 +00:00
|
|
|
dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
|
|
|
|
else
|
|
|
|
dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
|
|
|
|
|
|
|
|
dispctrl = HVS_READ(SCALER_DISPCTRL) &
|
|
|
|
~SCALER_DISPCTRL_DSP3_MUX_MASK;
|
|
|
|
HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
|
|
|
struct drm_atomic_state *state)
|
|
|
|
{
|
2022-03-31 14:37:44 +00:00
|
|
|
struct vc4_hvs *hvs = vc4->hvs;
|
2020-09-03 08:00:46 +00:00
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
struct drm_crtc *crtc;
|
2020-11-20 14:42:45 +00:00
|
|
|
unsigned char mux;
|
2020-09-03 08:00:46 +00:00
|
|
|
unsigned int i;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
|
|
|
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
|
|
|
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
2022-03-28 15:36:57 +00:00
|
|
|
unsigned int channel = vc4_state->assigned_channel;
|
2020-09-03 08:00:46 +00:00
|
|
|
|
2020-11-20 14:42:45 +00:00
|
|
|
if (!vc4_state->update_muxing)
|
2020-09-03 08:00:46 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (vc4_crtc->data->hvs_output) {
|
|
|
|
case 2:
|
2022-03-28 15:36:58 +00:00
|
|
|
drm_WARN_ON(&vc4->base,
|
|
|
|
VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
|
|
|
|
SCALER_DISPCTRL_DSP3_MUX) == channel);
|
|
|
|
|
2022-03-28 15:36:57 +00:00
|
|
|
mux = (channel == 2) ? 0 : 1;
|
2020-11-20 14:42:45 +00:00
|
|
|
reg = HVS_READ(SCALER_DISPECTRL);
|
|
|
|
HVS_WRITE(SCALER_DISPECTRL,
|
|
|
|
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
|
|
|
|
VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
|
2020-09-03 08:00:46 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 3:
|
2022-03-28 15:36:57 +00:00
|
|
|
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
2020-11-20 14:42:45 +00:00
|
|
|
mux = 3;
|
|
|
|
else
|
2022-03-28 15:36:57 +00:00
|
|
|
mux = channel;
|
2020-11-20 14:42:45 +00:00
|
|
|
|
|
|
|
reg = HVS_READ(SCALER_DISPCTRL);
|
|
|
|
HVS_WRITE(SCALER_DISPCTRL,
|
|
|
|
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
|
|
|
|
VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
|
2020-09-03 08:00:46 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 4:
|
2022-03-28 15:36:57 +00:00
|
|
|
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
2020-11-20 14:42:45 +00:00
|
|
|
mux = 3;
|
|
|
|
else
|
2022-03-28 15:36:57 +00:00
|
|
|
mux = channel;
|
2020-11-20 14:42:45 +00:00
|
|
|
|
|
|
|
reg = HVS_READ(SCALER_DISPEOLN);
|
|
|
|
HVS_WRITE(SCALER_DISPEOLN,
|
|
|
|
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
|
|
|
|
VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
|
|
|
|
|
2020-09-03 08:00:46 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 5:
|
2022-03-28 15:36:57 +00:00
|
|
|
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
2020-11-20 14:42:45 +00:00
|
|
|
mux = 3;
|
|
|
|
else
|
2022-03-28 15:36:57 +00:00
|
|
|
mux = channel;
|
2020-11-20 14:42:45 +00:00
|
|
|
|
|
|
|
reg = HVS_READ(SCALER_DISPDITHER);
|
|
|
|
HVS_WRITE(SCALER_DISPDITHER,
|
|
|
|
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
|
|
|
|
VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
|
2020-09-03 08:00:46 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-04 15:11:38 +00:00
|
|
|
static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
2015-11-30 20:34:01 +00:00
|
|
|
{
|
|
|
|
struct drm_device *dev = state->dev;
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
2020-09-03 08:00:35 +00:00
|
|
|
struct vc4_hvs *hvs = vc4->hvs;
|
2020-09-03 08:00:45 +00:00
|
|
|
struct drm_crtc_state *new_crtc_state;
|
2021-10-25 15:29:03 +00:00
|
|
|
struct vc4_hvs_state *new_hvs_state;
|
2020-09-03 08:00:45 +00:00
|
|
|
struct drm_crtc *crtc;
|
2020-12-04 15:11:35 +00:00
|
|
|
struct vc4_hvs_state *old_hvs_state;
|
drm/vc4: kms: Fix previous HVS commit wait
Our current code is supposed to serialise the commits by waiting for all
the drm_crtc_commits associated to the previous HVS state.
However, assuming we have two CRTCs running and being configured and we
configure each one alternately, we end up in a situation where we're
not waiting at all.
Indeed, starting with a state (state 0) where both CRTCs are running,
and doing a commit (state 1) on the first CRTC (CRTC 0), we'll associate
its commit to its assigned FIFO in vc4_hvs_state.
If we get a new commit (state 2), this time affecting the second CRTC
(CRTC 1), the DRM core will allow both commits to execute in parallel
(assuming they don't have any share resources).
Our code in vc4_atomic_commit_tail is supposed to make sure we only get
one commit at a time and serialised by order of submission. It does so
by using for_each_old_crtc_in_state, making sure that the CRTC has a
FIFO assigned, is used, and has a commit pending. If it does, then we'll
wait for the commit before going forward.
During the transition from state 0 to state 1, as our old CRTC state we
get the CRTC 0 state 0, its commit, we wait for it, everything works fine.
During the transition from state 1 to state 2 though, the use of
for_each_old_crtc_in_state is wrong. Indeed, while the code assumes it's
returning the state of the CRTC in the old state (so CRTC 0 state 1), it
actually returns the old state of the CRTC affected by the current
commit, so CRTC 0 state 0 since it wasn't part of state 1.
Due to this, if we alternate between the configuration of CRTC 0 and
CRTC 1, we never actually wait for anything since we should be waiting
on the other every time, but it never is affected by the previous
commit.
Change the logic to, at every commit, look at every FIFO in the previous
HVS state, and if it's in use and has a commit associated to it, wait
for that commit.
Fixes: 9ec03d7f1ed3 ("drm/vc4: kms: Wait on previous FIFO users before a commit")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Tested-by: Jian-Hong Pan <jhp@endlessos.org>
Link: https://lore.kernel.org/r/20211117094527.146275-7-maxime@cerno.tech
2021-11-17 09:45:27 +00:00
|
|
|
unsigned int channel;
|
2019-02-20 15:51:22 +00:00
|
|
|
int i;
|
|
|
|
|
2021-10-25 15:29:03 +00:00
|
|
|
old_hvs_state = vc4_hvs_get_old_global_state(state);
|
2021-12-14 08:43:25 +00:00
|
|
|
if (WARN_ON(IS_ERR(old_hvs_state)))
|
2021-10-25 15:29:03 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
new_hvs_state = vc4_hvs_get_new_global_state(state);
|
2021-12-14 08:43:25 +00:00
|
|
|
if (WARN_ON(IS_ERR(new_hvs_state)))
|
2021-10-25 15:29:03 +00:00
|
|
|
return;
|
|
|
|
|
2020-09-03 08:00:45 +00:00
|
|
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
2020-09-03 08:00:46 +00:00
|
|
|
struct vc4_crtc_state *vc4_crtc_state;
|
2020-09-03 08:00:45 +00:00
|
|
|
|
|
|
|
if (!new_crtc_state->commit)
|
2019-02-20 15:51:22 +00:00
|
|
|
continue;
|
|
|
|
|
2020-09-03 08:00:46 +00:00
|
|
|
vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
|
2022-03-31 14:37:44 +00:00
|
|
|
vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
|
2019-02-20 15:51:22 +00:00
|
|
|
}
|
2015-11-30 20:34:01 +00:00
|
|
|
|
drm/vc4: kms: Fix previous HVS commit wait
Our current code is supposed to serialise the commits by waiting for all
the drm_crtc_commits associated to the previous HVS state.
However, assuming we have two CRTCs running and being configured and we
configure each one alternately, we end up in a situation where we're
not waiting at all.
Indeed, starting with a state (state 0) where both CRTCs are running,
and doing a commit (state 1) on the first CRTC (CRTC 0), we'll associate
its commit to its assigned FIFO in vc4_hvs_state.
If we get a new commit (state 2), this time affecting the second CRTC
(CRTC 1), the DRM core will allow both commits to execute in parallel
(assuming they don't have any share resources).
Our code in vc4_atomic_commit_tail is supposed to make sure we only get
one commit at a time and serialised by order of submission. It does so
by using for_each_old_crtc_in_state, making sure that the CRTC has a
FIFO assigned, is used, and has a commit pending. If it does, then we'll
wait for the commit before going forward.
During the transition from state 0 to state 1, as our old CRTC state we
get the CRTC 0 state 0, its commit, we wait for it, everything works fine.
During the transition from state 1 to state 2 though, the use of
for_each_old_crtc_in_state is wrong. Indeed, while the code assumes it's
returning the state of the CRTC in the old state (so CRTC 0 state 1), it
actually returns the old state of the CRTC affected by the current
commit, so CRTC 0 state 0 since it wasn't part of state 1.
Due to this, if we alternate between the configuration of CRTC 0 and
CRTC 1, we never actually wait for anything since we should be waiting
on the other every time, but it never is affected by the previous
commit.
Change the logic to, at every commit, look at every FIFO in the previous
HVS state, and if it's in use and has a commit associated to it, wait
for that commit.
Fixes: 9ec03d7f1ed3 ("drm/vc4: kms: Wait on previous FIFO users before a commit")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Tested-by: Jian-Hong Pan <jhp@endlessos.org>
Link: https://lore.kernel.org/r/20211117094527.146275-7-maxime@cerno.tech
2021-11-17 09:45:27 +00:00
|
|
|
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
|
2021-11-17 09:45:24 +00:00
|
|
|
struct drm_crtc_commit *commit;
|
2021-01-11 08:44:01 +00:00
|
|
|
int ret;
|
2020-12-04 15:11:35 +00:00
|
|
|
|
|
|
|
if (!old_hvs_state->fifo_state[channel].in_use)
|
|
|
|
continue;
|
|
|
|
|
2021-11-17 09:45:24 +00:00
|
|
|
commit = old_hvs_state->fifo_state[channel].pending_commit;
|
|
|
|
if (!commit)
|
2020-12-04 15:11:35 +00:00
|
|
|
continue;
|
|
|
|
|
2021-11-17 09:45:24 +00:00
|
|
|
ret = drm_crtc_commit_wait(commit);
|
2021-01-11 08:44:01 +00:00
|
|
|
if (ret)
|
|
|
|
drm_err(dev, "Timed out waiting for commit\n");
|
2021-11-17 09:45:24 +00:00
|
|
|
|
|
|
|
drm_crtc_commit_put(commit);
|
2021-11-17 09:45:25 +00:00
|
|
|
old_hvs_state->fifo_state[channel].pending_commit = NULL;
|
2020-12-04 15:11:35 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:51:37 +00:00
|
|
|
if (vc4->is_vc5) {
|
2022-03-31 14:37:38 +00:00
|
|
|
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
|
|
|
|
new_hvs_state->core_clock_rate);
|
drm/vc4: kms: Wait for the commit before increasing our clock rate
Several DRM/KMS atomic commits can run in parallel if they affect
different CRTC. These commits share the global HVS state, so we have
some code to make sure we run commits in sequence. This synchronization
code is one of the first thing that runs in vc4_atomic_commit_tail().
Another constraints we have is that we need to make sure the HVS clock
gets a boost during the commit. That code relies on clk_set_min_rate and
will remove the old minimum and set a new one. We also need another,
temporary, minimum for the duration of the commit.
The algorithm is thus to set a temporary minimum, drop the previous
one, do the commit, and finally set the minimum for the current mode.
However, the part that sets the temporary minimum and drops the older
one runs before the commit synchronization code.
Thus, under the proper conditions, we can end up mixing up the minimums
and ending up with the wrong one for our current step.
To avoid it, let's move the clock setup in the protected section.
Fixes: d7d96c00e585 ("drm/vc4: hvs: Boost the core clock during modeset")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Tested-by: Jian-Hong Pan <jhp@endlessos.org>
[danvet: re-apply this from 0c980a006d3f ("drm/vc4: kms: Wait for the
commit before increasing our clock rate") because I lost that part in
my merge resolution in 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")]
Fixes: 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")
Acked-by: Maxime Ripard <maxime@cerno.tech>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://lore.kernel.org/r/20211117094527.146275-2-maxime@cerno.tech
2021-11-17 09:45:22 +00:00
|
|
|
unsigned long core_rate = max_t(unsigned long,
|
2022-03-31 14:37:38 +00:00
|
|
|
500000000, state_rate);
|
drm/vc4: kms: Wait for the commit before increasing our clock rate
Several DRM/KMS atomic commits can run in parallel if they affect
different CRTC. These commits share the global HVS state, so we have
some code to make sure we run commits in sequence. This synchronization
code is one of the first thing that runs in vc4_atomic_commit_tail().
Another constraints we have is that we need to make sure the HVS clock
gets a boost during the commit. That code relies on clk_set_min_rate and
will remove the old minimum and set a new one. We also need another,
temporary, minimum for the duration of the commit.
The algorithm is thus to set a temporary minimum, drop the previous
one, do the commit, and finally set the minimum for the current mode.
However, the part that sets the temporary minimum and drops the older
one runs before the commit synchronization code.
Thus, under the proper conditions, we can end up mixing up the minimums
and ending up with the wrong one for our current step.
To avoid it, let's move the clock setup in the protected section.
Fixes: d7d96c00e585 ("drm/vc4: hvs: Boost the core clock during modeset")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Tested-by: Jian-Hong Pan <jhp@endlessos.org>
[danvet: re-apply this from 0c980a006d3f ("drm/vc4: kms: Wait for the
commit before increasing our clock rate") because I lost that part in
my merge resolution in 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")]
Fixes: 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")
Acked-by: Maxime Ripard <maxime@cerno.tech>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://lore.kernel.org/r/20211117094527.146275-2-maxime@cerno.tech
2021-11-17 09:45:22 +00:00
|
|
|
|
2022-02-25 14:35:33 +00:00
|
|
|
drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a temporary request on the core clock during the
|
|
|
|
* modeset.
|
|
|
|
*/
|
drm/vc4: kms: Wait for the commit before increasing our clock rate
Several DRM/KMS atomic commits can run in parallel if they affect
different CRTC. These commits share the global HVS state, so we have
some code to make sure we run commits in sequence. This synchronization
code is one of the first thing that runs in vc4_atomic_commit_tail().
Another constraints we have is that we need to make sure the HVS clock
gets a boost during the commit. That code relies on clk_set_min_rate and
will remove the old minimum and set a new one. We also need another,
temporary, minimum for the duration of the commit.
The algorithm is thus to set a temporary minimum, drop the previous
one, do the commit, and finally set the minimum for the current mode.
However, the part that sets the temporary minimum and drops the older
one runs before the commit synchronization code.
Thus, under the proper conditions, we can end up mixing up the minimums
and ending up with the wrong one for our current step.
To avoid it, let's move the clock setup in the protected section.
Fixes: d7d96c00e585 ("drm/vc4: hvs: Boost the core clock during modeset")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Tested-by: Jian-Hong Pan <jhp@endlessos.org>
[danvet: re-apply this from 0c980a006d3f ("drm/vc4: kms: Wait for the
commit before increasing our clock rate") because I lost that part in
my merge resolution in 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")]
Fixes: 99b03ca651f1 ("Merge v5.16-rc5 into drm-next")
Acked-by: Maxime Ripard <maxime@cerno.tech>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://lore.kernel.org/r/20211117094527.146275-2-maxime@cerno.tech
2021-11-17 09:45:22 +00:00
|
|
|
clk_set_min_rate(hvs->core_clk, core_rate);
|
|
|
|
}
|
2022-02-25 14:35:33 +00:00
|
|
|
|
2015-11-30 20:34:01 +00:00
|
|
|
drm_atomic_helper_commit_modeset_disables(dev, state);
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
vc4_ctm_commit(vc4, state);
|
|
|
|
|
2022-06-10 11:51:37 +00:00
|
|
|
if (vc4->is_vc5)
|
2020-09-03 08:00:46 +00:00
|
|
|
vc5_hvs_pv_muxing_commit(vc4, state);
|
|
|
|
else
|
|
|
|
vc4_hvs_pv_muxing_commit(vc4, state);
|
|
|
|
|
2022-03-31 14:37:43 +00:00
|
|
|
drm_atomic_helper_commit_planes(dev, state,
|
|
|
|
DRM_PLANE_COMMIT_ACTIVE_ONLY);
|
2015-11-30 20:34:01 +00:00
|
|
|
|
|
|
|
drm_atomic_helper_commit_modeset_enables(dev, state);
|
|
|
|
|
2018-07-03 07:50:21 +00:00
|
|
|
drm_atomic_helper_fake_vblank(state);
|
|
|
|
|
2017-06-02 08:32:08 +00:00
|
|
|
drm_atomic_helper_commit_hw_done(state);
|
|
|
|
|
2018-07-03 07:50:18 +00:00
|
|
|
drm_atomic_helper_wait_for_flip_done(dev, state);
|
2015-11-30 20:34:01 +00:00
|
|
|
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
|
|
|
|
2022-06-10 11:51:37 +00:00
|
|
|
if (vc4->is_vc5) {
|
2021-10-25 15:29:03 +00:00
|
|
|
drm_dbg(dev, "Running the core clock at %lu Hz\n",
|
|
|
|
new_hvs_state->core_clock_rate);
|
|
|
|
|
2022-02-25 14:35:33 +00:00
|
|
|
/*
|
|
|
|
* Request a clock rate based on the current HVS
|
|
|
|
* requirements.
|
|
|
|
*/
|
2021-10-25 15:29:03 +00:00
|
|
|
clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
|
2022-03-28 15:36:59 +00:00
|
|
|
|
|
|
|
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
|
|
|
|
clk_get_rate(hvs->core_clk));
|
2021-10-25 15:29:03 +00:00
|
|
|
}
|
2015-11-30 20:34:01 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 15:11:35 +00:00
|
|
|
static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
struct vc4_hvs_state *hvs_state;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
hvs_state = vc4_hvs_get_new_global_state(state);
|
2021-11-17 09:45:23 +00:00
|
|
|
if (WARN_ON(IS_ERR(hvs_state)))
|
|
|
|
return PTR_ERR(hvs_state);
|
2020-12-04 15:11:35 +00:00
|
|
|
|
|
|
|
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
|
|
|
struct vc4_crtc_state *vc4_crtc_state =
|
|
|
|
to_vc4_crtc_state(crtc_state);
|
|
|
|
unsigned int channel =
|
|
|
|
vc4_crtc_state->assigned_channel;
|
|
|
|
|
|
|
|
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!hvs_state->fifo_state[channel].in_use)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hvs_state->fifo_state[channel].pending_commit =
|
|
|
|
drm_crtc_commit_get(crtc_state->commit);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-08 00:13:36 +00:00
|
|
|
static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
|
|
|
|
struct drm_file *file_priv,
|
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd)
|
|
|
|
{
|
2022-06-10 11:51:49 +00:00
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
2017-06-08 00:13:36 +00:00
|
|
|
struct drm_mode_fb_cmd2 mode_cmd_local;
|
|
|
|
|
2022-06-10 11:51:49 +00:00
|
|
|
if (WARN_ON_ONCE(vc4->is_vc5))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
2017-06-08 00:13:36 +00:00
|
|
|
/* If the user didn't specify a modifier, use the
|
|
|
|
* vc4_set_tiling_ioctl() state for the BO.
|
|
|
|
*/
|
|
|
|
if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
|
|
|
|
struct drm_gem_object *gem_obj;
|
|
|
|
struct vc4_bo *bo;
|
|
|
|
|
|
|
|
gem_obj = drm_gem_object_lookup(file_priv,
|
|
|
|
mode_cmd->handles[0]);
|
|
|
|
if (!gem_obj) {
|
2017-07-25 16:27:32 +00:00
|
|
|
DRM_DEBUG("Failed to look up GEM BO %d\n",
|
2017-06-08 00:13:36 +00:00
|
|
|
mode_cmd->handles[0]);
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
}
|
|
|
|
bo = to_vc4_bo(gem_obj);
|
|
|
|
|
|
|
|
mode_cmd_local = *mode_cmd;
|
|
|
|
|
|
|
|
if (bo->t_format) {
|
|
|
|
mode_cmd_local.modifier[0] =
|
|
|
|
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
|
|
|
|
} else {
|
|
|
|
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
|
|
|
|
}
|
|
|
|
|
2020-05-15 09:51:13 +00:00
|
|
|
drm_gem_object_put(gem_obj);
|
2017-06-08 00:13:36 +00:00
|
|
|
|
|
|
|
mode_cmd = &mode_cmd_local;
|
|
|
|
}
|
|
|
|
|
2017-08-13 13:32:03 +00:00
|
|
|
return drm_gem_fb_create(dev, file_priv, mode_cmd);
|
2017-06-08 00:13:36 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
|
|
|
|
* at a time and the HW only supports S0.9 scalars. To account for the latter,
|
|
|
|
* we don't allow userland to set a CTM that we have no hope of approximating.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
struct vc4_ctm_state *ctm_state = NULL;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
|
|
|
struct drm_color_ctm *ctm;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
|
|
|
/* CTM is being disabled. */
|
|
|
|
if (!new_crtc_state->ctm && old_crtc_state->ctm) {
|
|
|
|
ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
|
|
|
|
if (IS_ERR(ctm_state))
|
|
|
|
return PTR_ERR(ctm_state);
|
|
|
|
ctm_state->fifo = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
|
|
|
if (new_crtc_state->ctm == old_crtc_state->ctm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!ctm_state) {
|
|
|
|
ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
|
|
|
|
if (IS_ERR(ctm_state))
|
|
|
|
return PTR_ERR(ctm_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CTM is being enabled or the matrix changed. */
|
|
|
|
if (new_crtc_state->ctm) {
|
2020-09-03 08:00:46 +00:00
|
|
|
struct vc4_crtc_state *vc4_crtc_state =
|
|
|
|
to_vc4_crtc_state(new_crtc_state);
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
/* fifo is 1-based since 0 disables CTM. */
|
2020-09-03 08:00:46 +00:00
|
|
|
int fifo = vc4_crtc_state->assigned_channel + 1;
|
2018-04-20 12:25:44 +00:00
|
|
|
|
|
|
|
/* Check userland isn't trying to turn on CTM for more
|
|
|
|
* than one CRTC at a time.
|
|
|
|
*/
|
|
|
|
if (ctm_state->fifo && ctm_state->fifo != fifo) {
|
|
|
|
DRM_DEBUG_DRIVER("Too many CTM configured\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check we can approximate the specified CTM.
|
|
|
|
* We disallow scalars |c| > 1.0 since the HW has
|
|
|
|
* no integer bits.
|
|
|
|
*/
|
|
|
|
ctm = new_crtc_state->ctm->data;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
|
|
|
|
u64 val = ctm->matrix[i];
|
|
|
|
|
|
|
|
val &= ~BIT_ULL(63);
|
|
|
|
if (val > BIT_ULL(32))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctm_state->fifo = fifo;
|
|
|
|
ctm_state->ctm = ctm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:51:23 +00:00
|
|
|
static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_plane_state *old_plane_state, *new_plane_state;
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
|
|
|
struct vc4_load_tracker_state *load_state;
|
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
struct drm_plane *plane;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_private_obj_state(state,
|
|
|
|
&vc4->load_tracker);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return PTR_ERR(priv_state);
|
|
|
|
|
|
|
|
load_state = to_vc4_load_tracker_state(priv_state);
|
|
|
|
for_each_oldnew_plane_in_state(state, plane, old_plane_state,
|
|
|
|
new_plane_state, i) {
|
|
|
|
struct vc4_plane_state *vc4_plane_state;
|
|
|
|
|
|
|
|
if (old_plane_state->fb && old_plane_state->crtc) {
|
|
|
|
vc4_plane_state = to_vc4_plane_state(old_plane_state);
|
|
|
|
load_state->membus_load -= vc4_plane_state->membus_load;
|
|
|
|
load_state->hvs_load -= vc4_plane_state->hvs_load;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_plane_state->fb && new_plane_state->crtc) {
|
|
|
|
vc4_plane_state = to_vc4_plane_state(new_plane_state);
|
|
|
|
load_state->membus_load += vc4_plane_state->membus_load;
|
|
|
|
load_state->hvs_load += vc4_plane_state->hvs_load;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:51:24 +00:00
|
|
|
/* Don't check the load when the tracker is disabled. */
|
|
|
|
if (!vc4->load_tracker_enabled)
|
|
|
|
return 0;
|
|
|
|
|
2019-02-20 15:51:23 +00:00
|
|
|
/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
|
|
|
|
* the system work when other blocks are accessing the memory.
|
|
|
|
*/
|
|
|
|
if (load_state->membus_load > SZ_1G + SZ_512M)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
|
|
|
|
* consider the maximum number of cycles is 240M.
|
|
|
|
*/
|
|
|
|
if (load_state->hvs_load > 240000000ULL)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_private_state *
|
|
|
|
vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
|
|
|
|
{
|
|
|
|
struct vc4_load_tracker_state *state;
|
|
|
|
|
|
|
|
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
|
|
|
|
if (!state)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
|
|
|
|
|
|
|
return &state->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
|
|
|
|
struct drm_private_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_load_tracker_state *load_state;
|
|
|
|
|
|
|
|
load_state = to_vc4_load_tracker_state(state);
|
|
|
|
kfree(load_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
|
|
|
|
.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
|
|
|
|
.atomic_destroy_state = vc4_load_tracker_destroy_state,
|
|
|
|
};
|
|
|
|
|
2020-10-29 19:01:04 +00:00
|
|
|
static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
|
|
|
|
drm_atomic_private_obj_fini(&vc4->load_tracker);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
|
|
|
|
{
|
|
|
|
struct vc4_load_tracker_state *load_state;
|
|
|
|
|
|
|
|
load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
|
|
|
|
if (!load_state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
|
|
|
|
&load_state->base,
|
|
|
|
&vc4_load_tracker_state_funcs);
|
|
|
|
|
2020-11-05 13:56:50 +00:00
|
|
|
return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
2020-10-29 19:01:04 +00:00
|
|
|
}
|
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
static struct drm_private_state *
|
|
|
|
vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
|
|
|
|
{
|
|
|
|
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
|
|
|
|
struct vc4_hvs_state *state;
|
2020-12-04 15:11:35 +00:00
|
|
|
unsigned int i;
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
|
|
|
|
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
|
|
|
if (!state)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
|
|
|
|
2020-12-04 15:11:35 +00:00
|
|
|
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
|
|
|
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
|
2021-10-25 15:29:03 +00:00
|
|
|
state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
|
2020-12-04 15:11:35 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 15:29:03 +00:00
|
|
|
state->core_clock_rate = old_state->core_clock_rate;
|
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
return &state->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
|
|
|
|
struct drm_private_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
|
2020-12-04 15:11:35 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
|
|
|
if (!hvs_state->fifo_state[i].pending_commit)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
|
|
|
|
}
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
|
|
|
|
kfree(hvs_state);
|
|
|
|
}
|
|
|
|
|
2022-03-28 12:43:04 +00:00
|
|
|
static void vc4_hvs_channels_print_state(struct drm_printer *p,
|
|
|
|
const struct drm_private_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
drm_printf(p, "HVS State\n");
|
|
|
|
drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
|
|
|
|
|
|
|
|
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
|
|
|
drm_printf(p, "\tChannel %d\n", i);
|
|
|
|
drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
|
|
|
|
drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
|
|
|
|
.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
|
|
|
|
.atomic_destroy_state = vc4_hvs_channels_destroy_state,
|
2022-03-28 12:43:04 +00:00
|
|
|
.atomic_print_state = vc4_hvs_channels_print_state,
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
|
|
|
|
drm_atomic_private_obj_fini(&vc4->hvs_channels);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
|
|
|
|
{
|
|
|
|
struct vc4_hvs_state *state;
|
|
|
|
|
|
|
|
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
|
|
|
if (!state)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
|
|
|
|
&state->base,
|
|
|
|
&vc4_hvs_state_funcs);
|
|
|
|
|
|
|
|
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
|
|
|
|
}
|
|
|
|
|
2020-11-05 13:56:54 +00:00
|
|
|
/*
|
|
|
|
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
|
|
|
|
* the TXP (and therefore all the CRTCs found on that platform).
|
|
|
|
*
|
|
|
|
* The naive (and our initial) implementation would just iterate over
|
|
|
|
* all the active CRTCs, try to find a suitable FIFO, and then remove it
|
|
|
|
* from the pool of available FIFOs. However, there are a few corner
|
|
|
|
* cases that need to be considered:
|
|
|
|
*
|
|
|
|
* - When running in a dual-display setup (so with two CRTCs involved),
|
|
|
|
* we can update the state of a single CRTC (for example by changing
|
|
|
|
* its mode using xrandr under X11) without affecting the other. In
|
|
|
|
* this case, the other CRTC wouldn't be in the state at all, so we
|
|
|
|
* need to consider all the running CRTCs in the DRM device to assign
|
|
|
|
* a FIFO, not just the one in the state.
|
|
|
|
*
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
* - To fix the above, we can't use drm_atomic_get_crtc_state on all
|
|
|
|
* enabled CRTCs to pull their CRTC state into the global state, since
|
|
|
|
* a page flip would start considering their vblank to complete. Since
|
|
|
|
* we don't have a guarantee that they are actually active, that
|
|
|
|
* vblank might never happen, and shouldn't even be considered if we
|
|
|
|
* want to do a page flip on a single CRTC. That can be tested by
|
|
|
|
* doing a modetest -v first on HDMI1 and then on HDMI0.
|
|
|
|
*
|
2020-11-05 13:56:54 +00:00
|
|
|
* - Since we need the pixelvalve to be disabled and enabled back when
|
|
|
|
* the FIFO is changed, we should keep the FIFO assigned for as long
|
|
|
|
* as the CRTC is enabled, only considering it free again once that
|
|
|
|
* CRTC has been disabled. This can be tested by booting X11 on a
|
|
|
|
* single display, and changing the resolution down and then back up.
|
|
|
|
*/
|
2020-11-05 13:56:53 +00:00
|
|
|
static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
|
|
|
|
struct drm_atomic_state *state)
|
2018-04-20 12:25:44 +00:00
|
|
|
{
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
struct vc4_hvs_state *hvs_new_state;
|
2020-09-23 08:40:32 +00:00
|
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
2020-09-03 08:00:46 +00:00
|
|
|
struct drm_crtc *crtc;
|
2020-12-04 15:11:36 +00:00
|
|
|
unsigned int unassigned_channels = 0;
|
2020-11-05 13:56:53 +00:00
|
|
|
unsigned int i;
|
2020-09-03 08:00:46 +00:00
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
hvs_new_state = vc4_hvs_get_global_state(state);
|
2021-11-17 09:45:23 +00:00
|
|
|
if (IS_ERR(hvs_new_state))
|
|
|
|
return PTR_ERR(hvs_new_state);
|
drm/vc4: hvs: Pull the state of all the CRTCs prior to PV muxing
The vc4 display engine has a first controller called the HVS that will
perform the composition of the planes. That HVS has 3 FIFOs and can
therefore compose planes for up to three outputs. The timings part is
generated through a component called the Pixel Valve, and the BCM2711 has 6
of them.
Thus, the HVS has some bits to control which FIFO gets output to which
Pixel Valve. The current code supports that muxing by looking at all the
CRTCs in a new DRM atomic state in atomic_check, and given the set of
constraints that we have, assigns FIFOs to CRTCs or reject the mode
entirely. The actual muxing will occur during atomic_commit.
However, that doesn't work if only a fraction of the CRTCs' state is
updated in that state, since it will ignore the CRTCs that are kept running
unmodified, and will thus unassign its associated FIFO, and later disable
it.
In order to make the code work as expected, let's pull the CRTC state of
all the enabled CRTC in our atomic_check so that we can operate on all the
running CRTCs, no matter whether they are affected by the new state or not.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Tested-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Reviewed-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200917121623.42023-1-maxime@cerno.tech
2020-09-17 12:16:23 +00:00
|
|
|
|
2020-12-04 15:11:36 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
|
|
|
|
if (!hvs_new_state->fifo_state[i].in_use)
|
|
|
|
unassigned_channels |= BIT(i);
|
|
|
|
|
2020-09-23 08:40:32 +00:00
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
struct vc4_crtc_state *old_vc4_crtc_state =
|
|
|
|
to_vc4_crtc_state(old_crtc_state);
|
2020-09-23 08:40:32 +00:00
|
|
|
struct vc4_crtc_state *new_vc4_crtc_state =
|
|
|
|
to_vc4_crtc_state(new_crtc_state);
|
2020-09-03 08:00:46 +00:00
|
|
|
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
|
|
|
unsigned int matching_channels;
|
2020-12-04 15:11:34 +00:00
|
|
|
unsigned int channel;
|
2020-09-03 08:00:46 +00:00
|
|
|
|
2022-03-28 15:36:59 +00:00
|
|
|
drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
|
|
|
|
|
2020-11-20 14:42:45 +00:00
|
|
|
/* Nothing to do here, let's skip it */
|
2022-03-28 15:36:59 +00:00
|
|
|
if (old_crtc_state->enable == new_crtc_state->enable) {
|
|
|
|
if (new_crtc_state->enable)
|
|
|
|
drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
|
|
|
|
crtc->name, new_vc4_crtc_state->assigned_channel);
|
|
|
|
else
|
|
|
|
drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
|
|
|
|
|
2020-09-23 08:40:32 +00:00
|
|
|
continue;
|
2022-03-28 15:36:59 +00:00
|
|
|
}
|
2020-09-23 08:40:32 +00:00
|
|
|
|
2020-11-20 14:42:45 +00:00
|
|
|
/* Muxing will need to be modified, mark it as such */
|
|
|
|
new_vc4_crtc_state->update_muxing = true;
|
|
|
|
|
|
|
|
/* If we're disabling our CRTC, we put back our channel */
|
|
|
|
if (!new_crtc_state->enable) {
|
2020-12-04 15:11:35 +00:00
|
|
|
channel = old_vc4_crtc_state->assigned_channel;
|
2022-03-28 15:36:59 +00:00
|
|
|
|
|
|
|
drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
|
|
|
|
crtc->name, channel);
|
|
|
|
|
2020-12-04 15:11:35 +00:00
|
|
|
hvs_new_state->fifo_state[channel].in_use = false;
|
2020-11-20 14:42:45 +00:00
|
|
|
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
|
2020-09-03 08:00:46 +00:00
|
|
|
continue;
|
2020-11-20 14:42:45 +00:00
|
|
|
}
|
2020-09-03 08:00:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The problem we have to solve here is that we have
|
|
|
|
* up to 7 encoders, connected to up to 6 CRTCs.
|
|
|
|
*
|
|
|
|
* Those CRTCs, depending on the instance, can be
|
|
|
|
* routed to 1, 2 or 3 HVS FIFOs, and we need to set
|
|
|
|
* the change the muxing between FIFOs and outputs in
|
|
|
|
* the HVS accordingly.
|
|
|
|
*
|
|
|
|
* It would be pretty hard to come up with an
|
|
|
|
* algorithm that would generically solve
|
|
|
|
* this. However, the current routing trees we support
|
|
|
|
* allow us to simplify a bit the problem.
|
|
|
|
*
|
|
|
|
* Indeed, with the current supported layouts, if we
|
|
|
|
* try to assign in the ascending crtc index order the
|
|
|
|
* FIFOs, we can't fall into the situation where an
|
|
|
|
* earlier CRTC that had multiple routes is assigned
|
|
|
|
* one that was the only option for a later CRTC.
|
|
|
|
*
|
|
|
|
* If the layout changes and doesn't give us that in
|
|
|
|
* the future, we will need to have something smarter,
|
|
|
|
* but it works so far.
|
|
|
|
*/
|
2020-12-04 15:11:36 +00:00
|
|
|
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
|
2020-12-04 15:11:34 +00:00
|
|
|
if (!matching_channels)
|
2020-09-03 08:00:46 +00:00
|
|
|
return -EINVAL;
|
2020-12-04 15:11:34 +00:00
|
|
|
|
|
|
|
channel = ffs(matching_channels) - 1;
|
2022-03-28 15:36:59 +00:00
|
|
|
|
|
|
|
drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
|
2020-12-04 15:11:34 +00:00
|
|
|
new_vc4_crtc_state->assigned_channel = channel;
|
2020-12-04 15:11:36 +00:00
|
|
|
unassigned_channels &= ~BIT(channel);
|
2020-12-04 15:11:35 +00:00
|
|
|
hvs_new_state->fifo_state[channel].in_use = true;
|
2020-09-03 08:00:46 +00:00
|
|
|
}
|
2018-04-20 12:25:44 +00:00
|
|
|
|
2020-11-05 13:56:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-25 15:29:03 +00:00
|
|
|
static int
|
|
|
|
vc4_core_clock_atomic_check(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
|
|
|
struct drm_private_state *priv_state;
|
|
|
|
struct vc4_hvs_state *hvs_new_state;
|
|
|
|
struct vc4_load_tracker_state *load_state;
|
|
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
unsigned int num_outputs;
|
|
|
|
unsigned long pixel_rate;
|
|
|
|
unsigned long cob_rate;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
priv_state = drm_atomic_get_private_obj_state(state,
|
|
|
|
&vc4->load_tracker);
|
|
|
|
if (IS_ERR(priv_state))
|
|
|
|
return PTR_ERR(priv_state);
|
|
|
|
|
|
|
|
load_state = to_vc4_load_tracker_state(priv_state);
|
|
|
|
|
|
|
|
hvs_new_state = vc4_hvs_get_global_state(state);
|
2021-12-14 08:43:25 +00:00
|
|
|
if (IS_ERR(hvs_new_state))
|
|
|
|
return PTR_ERR(hvs_new_state);
|
2021-10-25 15:29:03 +00:00
|
|
|
|
|
|
|
for_each_oldnew_crtc_in_state(state, crtc,
|
|
|
|
old_crtc_state,
|
|
|
|
new_crtc_state,
|
|
|
|
i) {
|
|
|
|
if (old_crtc_state->active) {
|
|
|
|
struct vc4_crtc_state *old_vc4_state =
|
|
|
|
to_vc4_crtc_state(old_crtc_state);
|
|
|
|
unsigned int channel = old_vc4_state->assigned_channel;
|
|
|
|
|
|
|
|
hvs_new_state->fifo_state[channel].fifo_load = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_crtc_state->active) {
|
|
|
|
struct vc4_crtc_state *new_vc4_state =
|
|
|
|
to_vc4_crtc_state(new_crtc_state);
|
|
|
|
unsigned int channel = new_vc4_state->assigned_channel;
|
|
|
|
|
|
|
|
hvs_new_state->fifo_state[channel].fifo_load =
|
|
|
|
new_vc4_state->hvs_load;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cob_rate = 0;
|
|
|
|
num_outputs = 0;
|
|
|
|
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
|
|
|
if (!hvs_new_state->fifo_state[i].in_use)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
num_outputs++;
|
|
|
|
cob_rate += hvs_new_state->fifo_state[i].fifo_load;
|
|
|
|
}
|
|
|
|
|
|
|
|
pixel_rate = load_state->hvs_load;
|
|
|
|
if (num_outputs > 1) {
|
|
|
|
pixel_rate = (pixel_rate * 40) / 100;
|
|
|
|
} else {
|
|
|
|
pixel_rate = (pixel_rate * 60) / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-11-05 13:56:53 +00:00
|
|
|
static int
|
|
|
|
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = vc4_pv_muxing_atomic_check(dev, state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-04-20 12:25:44 +00:00
|
|
|
ret = vc4_ctm_atomic_check(dev, state);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2019-02-20 15:51:23 +00:00
|
|
|
ret = drm_atomic_helper_check(dev, state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-10-25 15:29:03 +00:00
|
|
|
ret = vc4_load_tracker_atomic_check(state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return vc4_core_clock_atomic_check(state);
|
2018-04-20 12:25:44 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 15:11:35 +00:00
|
|
|
static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
|
|
|
|
.atomic_commit_setup = vc4_atomic_commit_setup,
|
2020-12-04 15:11:38 +00:00
|
|
|
.atomic_commit_tail = vc4_atomic_commit_tail,
|
2020-12-04 15:11:35 +00:00
|
|
|
};
|
|
|
|
|
2015-03-02 21:01:12 +00:00
|
|
|
static const struct drm_mode_config_funcs vc4_mode_funcs = {
|
2018-04-20 12:25:44 +00:00
|
|
|
.atomic_check = vc4_atomic_check,
|
2020-12-04 15:11:38 +00:00
|
|
|
.atomic_commit = drm_atomic_helper_commit,
|
2017-06-08 00:13:36 +00:00
|
|
|
.fb_create = vc4_fb_create,
|
2015-03-02 21:01:12 +00:00
|
|
|
};
|
|
|
|
|
2022-06-10 11:51:41 +00:00
|
|
|
static const struct drm_mode_config_funcs vc5_mode_funcs = {
|
|
|
|
.atomic_check = vc4_atomic_check,
|
|
|
|
.atomic_commit = drm_atomic_helper_commit,
|
|
|
|
.fb_create = drm_gem_fb_create,
|
|
|
|
};
|
|
|
|
|
2015-03-02 21:01:12 +00:00
|
|
|
int vc4_kms_load(struct drm_device *dev)
|
|
|
|
{
|
2015-07-02 16:19:54 +00:00
|
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
2015-03-02 21:01:12 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-10-25 15:29:00 +00:00
|
|
|
/*
|
|
|
|
* The limits enforced by the load tracker aren't relevant for
|
|
|
|
* the BCM2711, but the load tracker computations are used for
|
|
|
|
* the core clock rate calculation.
|
|
|
|
*/
|
2022-06-10 11:51:37 +00:00
|
|
|
if (!vc4->is_vc5) {
|
2020-09-03 08:01:51 +00:00
|
|
|
/* Start with the load tracker enabled. Can be
|
|
|
|
* disabled through the debugfs load_tracker file.
|
|
|
|
*/
|
|
|
|
vc4->load_tracker_enabled = true;
|
|
|
|
}
|
2019-02-20 15:51:24 +00:00
|
|
|
|
2017-06-22 01:28:11 +00:00
|
|
|
/* Set support for vblank irq fast disable, before drm_vblank_init() */
|
|
|
|
dev->vblank_disable_immediate = true;
|
|
|
|
|
2015-03-02 21:01:12 +00:00
|
|
|
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(dev->dev, "failed to initialize vblank\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:51:37 +00:00
|
|
|
if (vc4->is_vc5) {
|
2020-09-03 08:01:51 +00:00
|
|
|
dev->mode_config.max_width = 7680;
|
|
|
|
dev->mode_config.max_height = 7680;
|
|
|
|
} else {
|
|
|
|
dev->mode_config.max_width = 2048;
|
|
|
|
dev->mode_config.max_height = 2048;
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:51:41 +00:00
|
|
|
dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
|
2020-12-04 15:11:35 +00:00
|
|
|
dev->mode_config.helper_private = &vc4_mode_config_helpers;
|
2015-03-02 21:01:12 +00:00
|
|
|
dev->mode_config.preferred_depth = 24;
|
2015-11-30 20:34:01 +00:00
|
|
|
dev->mode_config.async_page_flip = true;
|
|
|
|
|
2020-10-29 19:01:04 +00:00
|
|
|
ret = vc4_ctm_obj_init(vc4);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-02-20 15:51:23 +00:00
|
|
|
|
2020-10-29 19:01:04 +00:00
|
|
|
ret = vc4_load_tracker_obj_init(vc4);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-02-20 15:51:23 +00:00
|
|
|
|
drm/vc4: kms: Store the unassigned channel list in the state
If a CRTC is enabled but not active, and that we're then doing a page
flip on another CRTC, drm_atomic_get_crtc_state will bring the first
CRTC state into the global state, and will make us wait for its vblank
as well, even though that might never occur.
Instead of creating the list of the free channels each time atomic_check
is called, and calling drm_atomic_get_crtc_state to retrieve the
allocated channels, let's create a private state object in the main
atomic state, and use it to store the available channels.
Since vc4 has a semaphore (with a value of 1, so a lock) in its commit
implementation to serialize all the commits, even the nonblocking ones, we
are free from the use-after-free race if two subsequent commits are not ran
in their submission order.
Fixes: 87ebcd42fb7b ("drm/vc4: crtc: Assign output to channel automatically")
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201120144245.398711-2-maxime@cerno.tech
2020-11-20 14:42:44 +00:00
|
|
|
ret = vc4_hvs_channels_obj_init(vc4);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-03-02 21:01:12 +00:00
|
|
|
drm_mode_config_reset(dev);
|
|
|
|
|
|
|
|
drm_kms_helper_poll_init(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|