Merge branch 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel into drm-next

This serie of patches fix minor bugs around how driver sub-components are
bind and planes z-ordering.
The main part is about atomic support: using more atomic helpers allow us
to simplify the code (~300 lines removed) and to ahve a better match between
drm concepts (planes and crtc) and hardware split.

[airlied: fixed up conflict in atomic code]

* 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel:
  drm/sti: atomic crtc/plane update
  drm/sti: rename files and functions
  drm/sti: code clean up
  drm/sti: fix dynamic z-ordering
  drm: sti: fix sub-components bind
This commit is contained in:
Dave Airlie 2015-08-14 10:14:23 +10:00
commit e1474e7bdf
27 changed files with 1385 additions and 1697 deletions

View File

@ -52,10 +52,9 @@ STMicroelectronics stih4xx platforms
See ../reset/reset.txt for details.
- reset-names: names of the resets listed in resets property in the same
order.
- ranges: to allow probing of subdevices
- sti-hdmi: hdmi output block
must be a child of sti-tvout
must be a child of sti-display-subsystem
Required properties:
- compatible: "st,stih<chip>-hdmi";
- reg: Physical base address of the IP registers and length of memory mapped region.
@ -72,7 +71,7 @@ STMicroelectronics stih4xx platforms
sti-hda:
Required properties:
must be a child of sti-tvout
must be a child of sti-display-subsystem
- compatible: "st,stih<chip>-hda"
- reg: Physical base address of the IP registers and length of memory mapped region.
- reg-names: names of the mapped memory regions listed in regs property in
@ -85,7 +84,7 @@ sti-hda:
sti-dvo:
Required properties:
must be a child of sti-tvout
must be a child of sti-display-subsystem
- compatible: "st,stih<chip>-dvo"
- reg: Physical base address of the IP registers and length of memory mapped region.
- reg-names: names of the mapped memory regions listed in regs property in
@ -195,7 +194,7 @@ Example:
reg-names = "tvout-reg", "hda-reg", "syscfg";
reset-names = "tvout";
resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
ranges;
};
sti-hdmi@fe85c000 {
compatible = "st,stih416-hdmi";
@ -227,7 +226,6 @@ Example:
pinctrl-0 = <&pinctrl_dvo>;
sti,panel = <&panel_dvo>;
};
};
sti-hqvdp@9c000000 {
compatible = "st,stih407-hqvdp";

View File

@ -1,12 +1,11 @@
sticompositor-y := \
sti_layer.o \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
sti_cursor.o \
sti_compositor.o \
sti_drm_crtc.o \
sti_drm_plane.o
sti_crtc.o \
sti_plane.o
stihdmi-y := sti_hdmi.o \
sti_hdmi_tx3g0c55phy.o \
@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
sticompositor.o \
sti_hqvdp.o \
stidvo.o \
sti_drm_drv.o
sti_drv.o

View File

@ -14,10 +14,12 @@
#include <drm/drmP.h>
#include "sti_compositor.h"
#include "sti_drm_crtc.h"
#include "sti_drm_drv.h"
#include "sti_drm_plane.h"
#include "sti_crtc.h"
#include "sti_cursor.h"
#include "sti_drv.h"
#include "sti_gdp.h"
#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
/*
@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
{STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
},
};
static int sti_compositor_init_subdev(struct sti_compositor *compo,
struct sti_compositor_subdev_descriptor *desc,
unsigned int array_size)
static int sti_compositor_bind(struct device *dev,
struct device *master,
void *data)
{
unsigned int i, mixer_id = 0, layer_id = 0;
struct sti_compositor *compo = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
struct sti_private *dev_priv = drm_dev->dev_private;
struct drm_plane *cursor = NULL;
struct drm_plane *primary = NULL;
struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
unsigned int array_size = compo->data.nb_subdev;
dev_priv->compo = compo;
/* Register mixer subdev and video subdev first */
for (i = 0; i < array_size; i++) {
switch (desc[i].type) {
case STI_VID_SUBDEV:
compo->vid[vid_id++] =
sti_vid_create(compo->dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
case STI_VID_SUBDEV:
case STI_CURSOR_SUBDEV:
compo->layer[layer_id++] =
sti_layer_create(compo->dev, desc[i].id,
compo->regs + desc[i].offset);
/* Nothing to do, wait for the second round */
break;
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
}
}
compo->nb_mixers = mixer_id;
compo->nb_layers = layer_id;
return 0;
}
static int sti_compositor_bind(struct device *dev, struct device *master,
void *data)
{
struct sti_compositor *compo = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i, crtc = 0, plane = 0;
struct sti_drm_private *dev_priv = drm_dev->dev_private;
struct drm_plane *cursor = NULL;
struct drm_plane *primary = NULL;
dev_priv->compo = compo;
for (i = 0; i < compo->nb_layers; i++) {
if (compo->layer[i]) {
enum sti_layer_desc desc = compo->layer[i]->desc;
enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
/* Register the other subdevs, create crtc and planes */
for (i = 0; i < array_size; i++) {
enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
if (crtc < compo->nb_mixers)
if (crtc_id < mixer_id)
plane_type = DRM_PLANE_TYPE_PRIMARY;
switch (type) {
case STI_CUR:
cursor = sti_drm_plane_init(drm_dev,
compo->layer[i],
1, DRM_PLANE_TYPE_CURSOR);
switch (desc[i].type) {
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
case STI_VID_SUBDEV:
/* Nothing to do, already done at the first round */
break;
case STI_GDP:
case STI_VID:
primary = sti_drm_plane_init(drm_dev,
compo->layer[i],
(1 << compo->nb_mixers) - 1,
case STI_CURSOR_SUBDEV:
cursor = sti_cursor_create(drm_dev, compo->dev,
desc[i].id,
compo->regs + desc[i].offset,
1);
if (!cursor) {
DRM_ERROR("Can't create CURSOR plane\n");
break;
}
break;
case STI_GPD_SUBDEV:
primary = sti_gdp_create(drm_dev, compo->dev,
desc[i].id,
compo->regs + desc[i].offset,
(1 << mixer_id) - 1,
plane_type);
plane++;
if (!primary) {
DRM_ERROR("Can't create GDP plane\n");
break;
case STI_BCK:
case STI_VDP:
}
break;
default:
DRM_ERROR("Unknown subdev compoment type\n");
return 1;
}
/* The first planes are reserved for primary planes*/
if (crtc < compo->nb_mixers && primary) {
sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
if (crtc_id < mixer_id && primary) {
sti_crtc_init(drm_dev, compo->mixer[crtc_id],
primary, cursor);
crtc++;
crtc_id++;
cursor = NULL;
primary = NULL;
}
}
}
drm_vblank_init(drm_dev, crtc);
drm_vblank_init(drm_dev, crtc_id);
/* Allow usage of vblank without having to call drm_irq_install */
drm_dev->irq_enabled = 1;
DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
crtc, plane);
DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
return 0;
}
@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *vtg_np;
struct sti_compositor *compo;
struct resource *res;
int err;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
if (!compo) {
@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
return -ENOMEM;
}
compo->dev = dev;
compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
/* populate data structure depending on compatibility */
BUG_ON(!of_match_node(compositor_of_match, np)->data);
@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
/* Initialize compositor subdevices */
err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
compo->data.nb_subdev);
if (err)
return err;
platform_set_drvdata(pdev, compo);
return component_add(&pdev->dev, &sti_compositor_ops);

View File

@ -12,13 +12,13 @@
#include <linux/clk.h>
#include <linux/kernel.h>
#include "sti_layer.h"
#include "sti_mixer.h"
#include "sti_plane.h"
#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
#define STI_MAX_LAYER 8
#define STI_MAX_MIXER 2
#define STI_MAX_VID 1
enum sti_compositor_subdev_type {
STI_MIXER_MAIN_SUBDEV,
@ -59,11 +59,9 @@ struct sti_compositor_data {
* @rst_main: reset control of the main path
* @rst_aux: reset control of the aux path
* @mixer: array of mixers
* @vid: array of vids
* @vtg_main: vtg for main data path
* @vtg_aux: vtg for auxillary data path
* @layer: array of layers
* @nb_mixers: number of mixers for this compositor
* @nb_layers: number of layers (GDP,VID,...) for this compositor
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@ -77,11 +75,9 @@ struct sti_compositor {
struct reset_control *rst_main;
struct reset_control *rst_aux;
struct sti_mixer *mixer[STI_MAX_MIXER];
struct sti_vid *vid[STI_MAX_VID];
struct sti_vtg *vtg_main;
struct sti_vtg *vtg_aux;
struct sti_layer *layer[STI_MAX_LAYER];
int nb_mixers;
int nb_layers;
struct notifier_block vtg_vblank_nb;
};

View File

@ -15,22 +15,20 @@
#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_drm_drv.h"
#include "sti_drm_crtc.h"
#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_vid.h"
#include "sti_vtg.h"
static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
DRM_DEBUG_KMS("\n");
}
static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
mixer->enabled = true;
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_READY;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@ -41,36 +39,19 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
sti_mixer_clear_all_layers(mixer);
}
static void sti_drm_crtc_commit(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
struct sti_layer *layer;
if ((!mixer || !compo)) {
DRM_ERROR("Can not find mixer or compositor)\n");
return;
}
/* get GDP which is reserved to the CRTC FB */
layer = to_sti_layer(crtc->primary);
if (layer)
sti_layer_commit(layer);
else
DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
/* Enable layer on mixer */
if (sti_mixer_set_layer_status(mixer, layer, true))
DRM_ERROR("Can not enable layer at mixer\n");
drm_crtc_vblank_on(crtc);
}
static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
static void sti_crtc_disabling(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_DISABLING;
}
static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@ -79,7 +60,7 @@ static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
}
static int
sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
res = sti_mixer_active_video_area(mixer, &crtc->mode);
if (res) {
DRM_ERROR("Can not set active video area\n");
DRM_ERROR("Can't set active video area\n");
return -EINVAL;
}
return res;
}
static void sti_drm_crtc_disable(struct drm_crtc *crtc)
static void sti_crtc_disable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
if (!mixer->enabled)
return;
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
/* Disable Background */
@ -154,17 +132,17 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
mixer->enabled = false;
mixer->status = STI_MIXER_DISABLED;
}
static void
sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
sti_drm_crtc_prepare(crtc);
sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
sti_crtc_enable(crtc);
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
static void sti_drm_atomic_begin(struct drm_crtc *crtc,
static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@ -179,31 +157,93 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc,
}
}
static void sti_drm_atomic_flush(struct drm_crtc *crtc,
static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_device *drm_dev = crtc->dev;
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
struct drm_plane *p;
DRM_DEBUG_DRIVER("\n");
/* perform plane actions */
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
switch (plane->status) {
case STI_PLANE_UPDATED:
/* update planes tag as updated */
DRM_DEBUG_DRIVER("update plane %s\n",
sti_plane_to_str(plane));
if (sti_mixer_set_plane_depth(mixer, plane)) {
DRM_ERROR("Cannot set plane %s depth\n",
sti_plane_to_str(plane));
break;
}
if (sti_mixer_set_plane_status(mixer, plane, true)) {
DRM_ERROR("Cannot enable plane %s at mixer\n",
sti_plane_to_str(plane));
break;
}
/* if plane is HQVDP_0 then commit the vid[0] */
if (plane->desc == STI_HQVDP_0)
sti_vid_commit(compo->vid[0], p->state);
plane->status = STI_PLANE_READY;
break;
case STI_PLANE_DISABLING:
/* disabling sequence for planes tag as disabling */
DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
sti_plane_to_str(plane));
if (sti_mixer_set_plane_status(mixer, plane, false)) {
DRM_ERROR("Cannot disable plane %s at mixer\n",
sti_plane_to_str(plane));
continue;
}
if (plane->desc == STI_CURSOR)
/* tag plane status for disabled */
plane->status = STI_PLANE_DISABLED;
else
/* tag plane status for flushing */
plane->status = STI_PLANE_FLUSHING;
/* if plane is HQVDP_0 then disable the vid[0] */
if (plane->desc == STI_HQVDP_0)
sti_vid_disable(compo->vid[0]);
break;
default:
/* Other status case are not handled */
break;
}
}
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.dpms = sti_drm_crtc_dpms,
.prepare = sti_drm_crtc_prepare,
.commit = sti_drm_crtc_commit,
.mode_fixup = sti_drm_crtc_mode_fixup,
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
.mode_fixup = sti_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_drm_crtc_mode_set_nofb,
.mode_set_nofb = sti_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
.disable = sti_drm_crtc_disable,
.atomic_begin = sti_drm_atomic_begin,
.atomic_flush = sti_drm_atomic_flush,
.atomic_begin = sti_crtc_atomic_begin,
.atomic_flush = sti_crtc_atomic_flush,
};
static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
static void sti_crtc_destroy(struct drm_crtc *crtc)
{
DRM_DEBUG_KMS("\n");
drm_crtc_cleanup(crtc);
}
static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
static int sti_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
@ -211,7 +251,7 @@ static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
return 0;
}
int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct drm_device *drm_dev;
@ -219,7 +259,7 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
container_of(nb, struct sti_compositor, vtg_vblank_nb);
int *crtc = data;
unsigned long flags;
struct sti_drm_private *priv;
struct sti_private *priv;
drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
priv = drm_dev->dev_private;
@ -241,15 +281,32 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
struct drm_plane *p;
/* Disable mixer only if all overlay planes (GDP and VDP)
* are disabled */
list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
if (plane->status != STI_PLANE_DISABLED)
return 0;
}
sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
}
return 0;
}
int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
struct sti_drm_private *dev_priv = dev->dev_private;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
DRM_DEBUG_DRIVER("\n");
if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
vtg_vblank_nb, crtc)) {
@ -259,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
return 0;
}
EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
EXPORT_SYMBOL(sti_crtc_enable_vblank);
void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
{
struct sti_drm_private *priv = dev->dev_private;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
@ -275,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
/* free the resources of the pending requests */
if (compo->mixer[crtc]->pending_event) {
drm_vblank_put(dev, crtc);
drm_vblank_put(drm_dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
}
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
EXPORT_SYMBOL(sti_crtc_disable_vblank);
static struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = sti_drm_crtc_destroy,
.set_property = sti_drm_crtc_set_property,
.destroy = sti_crtc_destroy,
.set_property = sti_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
bool sti_crtc_is_main(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@ -300,9 +357,9 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
EXPORT_SYMBOL(sti_drm_crtc_is_main);
EXPORT_SYMBOL(sti_crtc_is_main);
int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor)
{
struct drm_crtc *crtc = &mixer->drm_crtc;
@ -311,7 +368,7 @@ int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&sti_crtc_funcs);
if (res) {
DRM_ERROR("Can not initialze CRTC\n");
DRM_ERROR("Can't initialze CRTC\n");
return -EINVAL;
}

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_CRTC_H_
#define _STI_CRTC_H_
#include <drm/drmP.h>
struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
#endif

View File

@ -7,8 +7,14 @@
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
#include "sti_layer.h"
#include "sti_plane.h"
#include "sti_vtg.h"
/* Registers */
@ -42,7 +48,9 @@ struct dma_pixmap {
/**
* STI Cursor structure
*
* @layer: layer structure
* @sti_plane: sti_plane structure
* @dev: driver device
* @regs: cursor registers
* @width: cursor width
* @height: cursor height
* @clut: color look up table
@ -50,7 +58,9 @@ struct dma_pixmap {
* @pixmap: pixmap dma buffer (clut8-format cursor)
*/
struct sti_cursor {
struct sti_layer layer;
struct sti_plane plane;
struct device *dev;
void __iomem *regs;
unsigned int width;
unsigned int height;
unsigned short *clut;
@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
DRM_FORMAT_ARGB8888,
};
#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
return cursor_supported_formats;
}
static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
{
return ARRAY_SIZE(cursor_supported_formats);
}
static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
{
struct sti_cursor *cursor = to_sti_cursor(layer);
u32 *src = layer->vaddr;
u8 *dst = cursor->pixmap.base;
unsigned int i, j;
u32 a, r, g, b;
@ -96,101 +94,8 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
}
}
static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
static void sti_cursor_init(struct sti_cursor *cursor)
{
struct sti_cursor *cursor = to_sti_cursor(layer);
struct drm_display_mode *mode = layer->mode;
u32 y, x;
u32 val;
DRM_DEBUG_DRIVER("\n");
dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
if (layer->src_w < STI_CURS_MIN_SIZE ||
layer->src_h < STI_CURS_MIN_SIZE ||
layer->src_w > STI_CURS_MAX_SIZE ||
layer->src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
layer->src_w, layer->src_h);
return -EINVAL;
}
/* If the cursor size has changed, re-allocated the pixmap */
if (!cursor->pixmap.base ||
(cursor->width != layer->src_w) ||
(cursor->height != layer->src_h)) {
cursor->width = layer->src_w;
cursor->height = layer->src_h;
if (cursor->pixmap.base)
dma_free_writecombine(layer->dev,
cursor->pixmap.size,
cursor->pixmap.base,
cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
cursor->pixmap.size,
&cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
return -ENOMEM;
}
}
/* Convert ARGB8888 to CLUT8 */
sti_cursor_argb8888_to_clut8(layer);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
x = sti_vtg_get_pixel_number(*mode, 0);
val = y << 16 | x;
writel(val, layer->regs + CUR_AWS);
y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
val = y << 16 | x;
writel(val, layer->regs + CUR_AWE);
if (first_prepare) {
/* Set and fetch CLUT */
writel(cursor->clut_paddr, layer->regs + CUR_CML);
writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
}
return 0;
}
static int sti_cursor_commit_layer(struct sti_layer *layer)
{
struct sti_cursor *cursor = to_sti_cursor(layer);
struct drm_display_mode *mode = layer->mode;
u32 ydo, xdo;
dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
/* Set memory location, size, and position */
writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
writel(cursor->width, layer->regs + CUR_PMP);
writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
return 0;
}
static int sti_cursor_disable_layer(struct sti_layer *layer)
{
return 0;
}
static void sti_cursor_init(struct sti_layer *layer)
{
struct sti_cursor *cursor = to_sti_cursor(layer);
unsigned short *base = cursor->clut;
unsigned int a, r, g, b;
@ -205,18 +110,139 @@ static void sti_cursor_init(struct sti_layer *layer)
(b * 5);
}
static const struct sti_layer_funcs cursor_ops = {
.get_formats = sti_cursor_get_formats,
.get_nb_formats = sti_cursor_get_nb_formats,
.init = sti_cursor_init,
.prepare = sti_cursor_prepare_layer,
.commit = sti_cursor_commit_layer,
.disable = sti_cursor_disable_layer,
static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
struct drm_display_mode *mode = &crtc->mode;
int dst_x = state->crtc_x;
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
int src_w = state->src_w >> 16;
int src_h = state->src_h >> 16;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct drm_gem_cma_object *cma_obj;
u32 y, x;
u32 val;
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
dev_dbg(cursor->dev, "%s %s\n", __func__,
sti_plane_to_str(plane));
if (src_w < STI_CURS_MIN_SIZE ||
src_h < STI_CURS_MIN_SIZE ||
src_w > STI_CURS_MAX_SIZE ||
src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
src_w, src_h);
return;
}
/* If the cursor size has changed, re-allocated the pixmap */
if (!cursor->pixmap.base ||
(cursor->width != src_w) ||
(cursor->height != src_h)) {
cursor->width = src_w;
cursor->height = src_h;
if (cursor->pixmap.base)
dma_free_writecombine(cursor->dev,
cursor->pixmap.size,
cursor->pixmap.base,
cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
cursor->pixmap.size,
&cursor->pixmap.paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
return;
}
}
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
return;
}
/* Convert ARGB8888 to CLUT8 */
sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
x = sti_vtg_get_pixel_number(*mode, 0);
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWS);
y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWE);
if (first_prepare) {
/* Set and fetch CLUT */
writel(cursor->clut_paddr, cursor->regs + CUR_CML);
writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
}
/* Set memory location, size, and position */
writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
writel(cursor->width, cursor->regs + CUR_PMP);
writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
y = sti_vtg_get_line_number(*mode, dst_y);
x = sti_vtg_get_pixel_number(*mode, dst_y);
writel((y << 16) | x, cursor->regs + CUR_VPO);
plane->status = STI_PLANE_UPDATED;
}
static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_update = sti_cursor_atomic_update,
.atomic_disable = sti_cursor_atomic_disable,
};
struct sti_layer *sti_cursor_create(struct device *dev)
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs)
{
struct sti_cursor *cursor;
size_t size;
int res;
cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
if (!cursor) {
@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
}
/* Allocate clut buffer */
cursor->clut = dma_alloc_writecombine(dev,
0x100 * sizeof(unsigned short),
&cursor->clut_paddr,
size = 0x100 * sizeof(unsigned short);
cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
goto err_clut;
}
cursor->dev = dev;
cursor->regs = baseaddr;
cursor->plane.desc = desc;
cursor->plane.status = STI_PLANE_DISABLED;
sti_cursor_init(cursor);
res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
possible_crtcs,
&sti_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
DRM_PLANE_TYPE_CURSOR);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err_plane;
}
drm_plane_helper_add(&cursor->plane.drm_plane,
&sti_cursor_helpers_funcs);
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
return &cursor->plane.drm_plane;
err_plane:
dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
err_clut:
devm_kfree(dev, cursor);
return NULL;
}
cursor->layer.ops = &cursor_ops;
return (struct sti_layer *)cursor;
}

View File

@ -7,6 +7,9 @@
#ifndef _STI_CURSOR_H_
#define _STI_CURSOR_H_
struct sti_layer *sti_cursor_create(struct device *dev);
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs);
#endif

View File

@ -1,22 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_DRM_CRTC_H_
#define _STI_DRM_CRTC_H_
#include <drm/drmP.h>
struct sti_mixer;
int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
#endif

View File

@ -1,251 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_drm_drv.h"
#include "sti_drm_plane.h"
#include "sti_vtg.h"
enum sti_layer_desc sti_layer_default_zorder[] = {
STI_GDP_0,
STI_VID_0,
STI_GDP_1,
STI_VID_1,
STI_GDP_2,
STI_GDP_3,
};
/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
static int
sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct sti_layer *layer = to_sti_layer(plane);
struct sti_mixer *mixer = to_sti_mixer(crtc);
int res;
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
plane->base.id, sti_layer_to_str(layer));
DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
res = sti_mixer_set_layer_depth(mixer, layer);
if (res) {
DRM_ERROR("Can not set layer depth\n");
return res;
}
/* src_x are in 16.16 format. */
res = sti_layer_prepare(layer, crtc, fb,
&crtc->mode, mixer->id,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x >> 16, src_y >> 16,
src_w >> 16, src_h >> 16);
if (res) {
DRM_ERROR("Layer prepare failed\n");
return res;
}
res = sti_layer_commit(layer);
if (res) {
DRM_ERROR("Layer commit failed\n");
return res;
}
res = sti_mixer_set_layer_status(mixer, layer, true);
if (res) {
DRM_ERROR("Can not enable layer at mixer\n");
return res;
}
return 0;
}
static int sti_drm_disable_plane(struct drm_plane *plane)
{
struct sti_layer *layer;
struct sti_mixer *mixer;
int lay_res, mix_res;
if (!plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
return 0;
}
layer = to_sti_layer(plane);
mixer = to_sti_mixer(plane->crtc);
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
plane->crtc->base.id, sti_mixer_to_str(mixer),
plane->base.id, sti_layer_to_str(layer));
/* Disable layer at mixer level */
mix_res = sti_mixer_set_layer_status(mixer, layer, false);
if (mix_res)
DRM_ERROR("Can not disable layer at mixer\n");
/* Wait a while to be sure that a Vsync event is received */
msleep(WAIT_NEXT_VSYNC_MS);
/* Then disable layer itself */
lay_res = sti_layer_disable(layer);
if (lay_res)
DRM_ERROR("Layer disable failed\n");
if (lay_res || mix_res)
return -EINVAL;
return 0;
}
static void sti_drm_plane_destroy(struct drm_plane *plane)
{
DRM_DEBUG_DRIVER("\n");
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
}
static int sti_drm_plane_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct sti_drm_private *private = dev->dev_private;
struct sti_layer *layer = to_sti_layer(plane);
DRM_DEBUG_DRIVER("\n");
if (property == private->plane_zorder_property) {
layer->zorder = val;
return 0;
}
return -EINVAL;
}
static struct drm_plane_funcs sti_drm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_drm_plane_destroy,
.set_property = sti_drm_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *new_state)
{
return 0;
}
static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb,
const struct drm_plane_state *old_fb)
{
}
static int sti_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
return 0;
}
static void sti_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *oldstate)
{
struct drm_plane_state *state = plane->state;
sti_drm_update_plane(plane, state->crtc, state->fb,
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
}
static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *oldstate)
{
sti_drm_disable_plane(plane);
}
static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
.prepare_fb = sti_drm_plane_prepare_fb,
.cleanup_fb = sti_drm_plane_cleanup_fb,
.atomic_check = sti_drm_plane_atomic_check,
.atomic_update = sti_drm_plane_atomic_update,
.atomic_disable = sti_drm_plane_atomic_disable,
};
static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
uint64_t default_val)
{
struct drm_device *dev = plane->dev;
struct sti_drm_private *private = dev->dev_private;
struct drm_property *prop;
struct sti_layer *layer = to_sti_layer(plane);
prop = private->plane_zorder_property;
if (!prop) {
prop = drm_property_create_range(dev, 0, "zpos", 0,
GAM_MIXER_NB_DEPTH_LEVEL - 1);
if (!prop)
return;
private->plane_zorder_property = prop;
}
drm_object_attach_property(&plane->base, prop, default_val);
layer->zorder = default_val;
}
struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
struct sti_layer *layer,
unsigned int possible_crtcs,
enum drm_plane_type type)
{
int err, i;
uint64_t default_zorder = 0;
err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
&sti_drm_plane_funcs,
sti_layer_get_formats(layer),
sti_layer_get_nb_formats(layer), type);
if (err) {
DRM_ERROR("Failed to initialize plane\n");
return NULL;
}
drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
if (sti_layer_default_zorder[i] == layer->desc)
break;
default_zorder = i + 1;
if (type == DRM_PLANE_TYPE_OVERLAY)
sti_drm_plane_attach_zorder_property(&layer->plane,
default_zorder);
DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
layer->plane.base.id,
sti_layer_to_str(layer), default_zorder);
return &layer->plane;
}
EXPORT_SYMBOL(sti_drm_plane_init);

View File

@ -1,18 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_DRM_PLANE_H_
#define _STI_DRM_PLANE_H_
#include <drm/drmP.h>
struct sti_layer;
struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
struct sti_layer *layer,
unsigned int possible_crtcs,
enum drm_plane_type type);
#endif

View File

@ -18,8 +18,8 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "sti_drm_drv.h"
#include "sti_drm_crtc.h"
#include "sti_crtc.h"
#include "sti_drv.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
@ -30,14 +30,14 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
static void sti_drm_atomic_schedule(struct sti_drm_private *private,
static void sti_atomic_schedule(struct sti_private *private,
struct drm_atomic_state *state)
{
private->commit.state = state;
schedule_work(&private->commit.work);
}
static void sti_drm_atomic_complete(struct sti_drm_private *private,
static void sti_atomic_complete(struct sti_private *private,
struct drm_atomic_state *state)
{
struct drm_device *drm = private->drm_dev;
@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
drm_atomic_state_free(state);
}
static void sti_drm_atomic_work(struct work_struct *work)
static void sti_atomic_work(struct work_struct *work)
{
struct sti_drm_private *private = container_of(work,
struct sti_drm_private, commit.work);
struct sti_private *private = container_of(work,
struct sti_private, commit.work);
sti_drm_atomic_complete(private, private->commit.state);
sti_atomic_complete(private, private->commit.state);
}
static int sti_drm_atomic_commit(struct drm_device *drm,
static int sti_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool async)
{
struct sti_drm_private *private = drm->dev_private;
struct sti_private *private = drm->dev_private;
int err;
err = drm_atomic_helper_prepare_planes(drm, state);
@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(drm, state);
if (async)
sti_drm_atomic_schedule(private, state);
sti_atomic_schedule(private, state);
else
sti_drm_atomic_complete(private, state);
sti_atomic_complete(private, state);
mutex_unlock(&private->commit.lock);
return 0;
}
static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
static struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = sti_drm_atomic_commit,
.atomic_commit = sti_atomic_commit,
};
static void sti_drm_mode_config_init(struct drm_device *dev)
static void sti_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
dev->mode_config.max_height = STI_MAX_FB_WIDTH;
dev->mode_config.funcs = &sti_drm_mode_config_funcs;
dev->mode_config.funcs = &sti_mode_config_funcs;
}
static int sti_drm_load(struct drm_device *dev, unsigned long flags)
static int sti_load(struct drm_device *dev, unsigned long flags)
{
struct sti_drm_private *private;
struct sti_private *private;
int ret;
private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private) {
DRM_ERROR("Failed to allocate private\n");
return -ENOMEM;
@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
private->drm_dev = dev;
mutex_init(&private->commit.lock);
INIT_WORK(&private->commit.work, sti_drm_atomic_work);
INIT_WORK(&private->commit.work, sti_atomic_work);
drm_mode_config_init(dev);
drm_kms_helper_poll_init(dev);
sti_drm_mode_config_init(dev);
sti_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
if (ret) {
@ -168,7 +168,7 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
return 0;
}
static const struct file_operations sti_drm_driver_fops = {
static const struct file_operations sti_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = drm_gem_cma_mmap,
@ -181,7 +181,7 @@ static const struct file_operations sti_drm_driver_fops = {
.release = drm_release,
};
static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj,
int flags)
{
@ -190,24 +190,24 @@ static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
return drm_gem_prime_export(dev, obj, flags);
}
static struct drm_driver sti_drm_driver = {
static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME,
.load = sti_drm_load,
.load = sti_load,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &sti_drm_driver_fops,
.fops = &sti_driver_fops,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = sti_drm_crtc_enable_vblank,
.disable_vblank = sti_drm_crtc_disable_vblank,
.enable_vblank = sti_crtc_enable_vblank,
.disable_vblank = sti_crtc_disable_vblank,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = sti_drm_gem_prime_export,
.gem_prime_export = sti_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
static int sti_drm_bind(struct device *dev)
static int sti_bind(struct device *dev)
{
return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
return drm_platform_init(&sti_driver, to_platform_device(dev));
}
static void sti_drm_unbind(struct device *dev)
static void sti_unbind(struct device *dev)
{
drm_put_dev(dev_get_drvdata(dev));
}
static const struct component_master_ops sti_drm_ops = {
.bind = sti_drm_bind,
.unbind = sti_drm_unbind,
static const struct component_master_ops sti_ops = {
.bind = sti_bind,
.unbind = sti_unbind,
};
static int sti_drm_master_probe(struct platform_device *pdev)
static int sti_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->parent->of_node;
struct device_node *node = dev->of_node;
struct device_node *child_np;
struct component_match *match = NULL;
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
of_platform_populate(node, NULL, NULL, dev);
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, child_np);
}
return component_master_add_with_match(dev, &sti_drm_ops, match);
return component_master_add_with_match(dev, &sti_ops, match);
}
static int sti_drm_master_remove(struct platform_device *pdev)
static int sti_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sti_drm_ops);
return 0;
}
static struct platform_driver sti_drm_master_driver = {
.probe = sti_drm_master_probe,
.remove = sti_drm_master_remove,
.driver = {
.name = DRIVER_NAME "__master",
},
};
static int sti_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct platform_device *master;
of_platform_populate(node, NULL, NULL, dev);
platform_driver_register(&sti_drm_master_driver);
master = platform_device_register_resndata(dev,
DRIVER_NAME "__master", -1,
NULL, 0, NULL, 0);
if (IS_ERR(master))
return PTR_ERR(master);
platform_set_drvdata(pdev, master);
return 0;
}
static int sti_drm_platform_remove(struct platform_device *pdev)
{
struct platform_device *master = platform_get_drvdata(pdev);
component_master_del(&pdev->dev, &sti_ops);
of_platform_depopulate(&pdev->dev);
platform_device_unregister(master);
platform_driver_unregister(&sti_drm_master_driver);
return 0;
}
static const struct of_device_id sti_drm_dt_ids[] = {
static const struct of_device_id sti_dt_ids[] = {
{ .compatible = "st,sti-display-subsystem", },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
MODULE_DEVICE_TABLE(of, sti_dt_ids);
static struct platform_driver sti_drm_platform_driver = {
.probe = sti_drm_platform_probe,
.remove = sti_drm_platform_remove,
static struct platform_driver sti_platform_driver = {
.probe = sti_platform_probe,
.remove = sti_platform_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = sti_drm_dt_ids,
.of_match_table = sti_dt_ids,
},
};
module_platform_driver(sti_drm_platform_driver);
module_platform_driver(sti_platform_driver);
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");

View File

@ -4,8 +4,8 @@
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_DRM_DRV_H_
#define _STI_DRM_DRV_H_
#ifndef _STI_DRV_H_
#define _STI_DRV_H_
#include <drm/drmP.h>
@ -20,7 +20,7 @@ struct sti_tvout;
* @plane_zorder_property: z-order property for CRTC planes
* @drm_dev: drm device
*/
struct sti_drm_private {
struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;

View File

@ -9,9 +9,12 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
#include "sti_layer.h"
#include "sti_plane.h"
#include "sti_vtg.h"
#define ALPHASWITCH BIT(6)
@ -85,7 +88,9 @@ struct sti_gdp_node_list {
/**
* STI GDP structure
*
* @layer: layer structure
* @sti_plane: sti_plane structure
* @dev: driver device
* @regs: gdp registers
* @clk_pix: pixel clock for the current gdp
* @clk_main_parent: gdp parent clock if main path used
* @clk_aux_parent: gdp parent clock if aux path used
@ -94,7 +99,9 @@ struct sti_gdp_node_list {
* @node_list: array of node list
*/
struct sti_gdp {
struct sti_layer layer;
struct sti_plane plane;
struct device *dev;
void __iomem *regs;
struct clk *clk_pix;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
@ -103,7 +110,7 @@ struct sti_gdp {
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
};
#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_C8,
};
static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
{
return gdp_supported_formats;
}
static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
{
return ARRAY_SIZE(gdp_supported_formats);
}
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
/**
* sti_gdp_get_free_nodes
* @layer: gdp layer
* @gdp: gdp pointer
*
* Look for a GDP node list that is not currently read by the HW.
*
* RETURNS:
* Pointer to the free GDP node list
*/
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
/* in hazardious cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_layer_to_str(layer), hw_nvn);
sti_plane_to_str(&gdp->plane), hw_nvn);
end:
return &gdp->node_list[0];
@ -207,7 +203,7 @@ end:
/**
* sti_gdp_get_current_nodes
* @layer: GDP layer
* @gdp: gdp pointer
*
* Look for GDP nodes that are currently read by the HW.
*
@ -215,13 +211,12 @@ end:
* Pointer to the current GDP node list
*/
static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
hw_nvn, sti_layer_to_str(layer));
hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/**
* sti_gdp_prepare_layer
* @lay: gdp layer
* @first_prepare: true if it is the first time this function is called
*
* Update the free GDP node list according to the layer properties.
*
* RETURNS:
* 0 on success.
*/
static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
{
struct sti_gdp_node_list *list;
struct sti_gdp_node *top_field, *btm_field;
struct drm_display_mode *mode = layer->mode;
struct device *dev = layer->dev;
struct sti_gdp *gdp = to_sti_gdp(layer);
struct sti_compositor *compo = dev_get_drvdata(dev);
int format;
unsigned int depth, bpp;
int rate = mode->clock * 1000;
int res;
u32 ydo, xdo, yds, xds;
list = sti_gdp_get_free_nodes(layer);
top_field = list->top_field;
btm_field = list->btm_field;
dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
sti_layer_to_str(layer), top_field, btm_field);
/* Build the top field from layer params */
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(layer->format);
if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
(char *)&layer->format);
return 1;
}
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
/* pixel memory location */
drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
/* input parameters */
top_field->gam_gdp_pmp = layer->pitches[0];
top_field->gam_gdp_size =
clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
/* output parameters */
ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
layer->pitches[0];
if (first_prepare) {
/* Register gdp callback */
if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
&gdp->vtg_field_nb, layer->mixer_id)) {
DRM_ERROR("Cannot register VTG notifier\n");
return 1;
}
/* Set and enable gdp clock */
if (gdp->clk_pix) {
struct clk *clkp;
/* According to the mixer used, the gdp pixel clock
* should have a different parent clock. */
if (layer->mixer_id == STI_MIXER_MAIN)
clkp = gdp->clk_main_parent;
else
clkp = gdp->clk_aux_parent;
if (clkp)
clk_set_parent(gdp->clk_pix, clkp);
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
rate);
return 1;
}
if (clk_prepare_enable(gdp->clk_pix)) {
DRM_ERROR("Failed to prepare/enable gdp\n");
return 1;
}
}
}
return 0;
}
/**
* sti_gdp_commit_layer
* @lay: gdp layer
*
* Update the NVN field of the 'right' field of the current GDP node (being
* used by the HW) with the address of the updated ('free') top field GDP node.
* - In interlaced mode the 'right' field is the bottom field as we update
* frames starting from their top field
* - In progressive mode, we update both bottom and top fields which are
* equal nodes.
* At the next VSYNC, the updated node list will be used by the HW.
*
* RETURNS:
* 0 on success.
*/
static int sti_gdp_commit_layer(struct sti_layer *layer)
{
struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
struct sti_gdp_node *updated_top_node = updated_list->top_field;
struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
struct sti_gdp *gdp = to_sti_gdp(layer);
u32 dma_updated_top = updated_list->top_field_paddr;
u32 dma_updated_btm = updated_list->btm_field_paddr;
struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
sti_layer_to_str(layer),
updated_top_node, updated_btm_node);
dev_dbg(layer->dev, "Current NVN:0x%X\n",
readl(layer->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
(unsigned long)layer->paddr,
readl(layer->regs + GAM_GDP_PML_OFFSET));
if (curr_list == NULL) {
/* First update or invalid node should directly write in the
* hw register */
DRM_DEBUG_DRIVER("%s first update (or invalid node)",
sti_layer_to_str(layer));
writel(gdp->is_curr_top == true ?
dma_updated_btm : dma_updated_top,
layer->regs + GAM_GDP_NVN_OFFSET);
return 0;
}
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (gdp->is_curr_top == true) {
/* Do not update in the middle of the frame, but
* postpone the update after the bottom field has
* been displayed */
curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
} else {
/* Direct update to avoid one frame delay */
writel(dma_updated_top,
layer->regs + GAM_GDP_NVN_OFFSET);
}
} else {
/* Direct update for progressive to avoid one frame delay */
writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
}
return 0;
}
/**
* sti_gdp_disable_layer
* @lay: gdp layer
* sti_gdp_disable
* @gdp: gdp pointer
*
* Disable a GDP.
*
* RETURNS:
* 0 on success.
*/
static int sti_gdp_disable_layer(struct sti_layer *layer)
static void sti_gdp_disable(struct sti_gdp *gdp)
{
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
struct sti_gdp *gdp = to_sti_gdp(layer);
struct sti_compositor *compo = dev_get_drvdata(layer->dev);
DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
/* Set the nodes as 'to be ignored on mixer' */
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
return 0;
gdp->plane.status = STI_PLANE_DISABLED;
}
/**
@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
if (gdp->plane.status == STI_PLANE_FLUSHING) {
/* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&gdp->plane));
sti_gdp_disable(gdp);
}
switch (event) {
case VTG_TOP_FIELD_EVENT:
gdp->is_curr_top = true;
@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
return 0;
}
static void sti_gdp_init(struct sti_layer *layer)
static void sti_gdp_init(struct sti_gdp *gdp)
{
struct sti_gdp *gdp = to_sti_gdp(layer);
struct device_node *np = layer->dev->of_node;
struct device_node *np = gdp->dev->of_node;
dma_addr_t dma_addr;
void *base;
unsigned int i, size;
@ -490,7 +312,7 @@ static void sti_gdp_init(struct sti_layer *layer)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_writecombine(layer->dev,
base = dma_alloc_writecombine(gdp->dev,
size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
/* GDP of STiH407 chip have its own pixel clock */
char *clk_name;
switch (layer->desc) {
switch (gdp->plane.desc) {
case STI_GDP_0:
clk_name = "pix_gdp1";
break;
@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
return;
}
gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
static const struct sti_layer_funcs gdp_ops = {
.get_formats = sti_gdp_get_formats,
.get_nb_formats = sti_gdp_get_nb_formats,
.init = sti_gdp_init,
.prepare = sti_gdp_prepare_layer,
.commit = sti_gdp_commit_layer,
.disable = sti_gdp_disable_layer,
static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
struct drm_gem_cma_object *cma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
u32 dma_updated_top;
u32 dma_updated_btm;
int format;
unsigned int depth, bpp;
u32 ydo, xdo, yds, xds;
int res;
/* Manage the case where crtc is null (disabled) */
if (!crtc)
return;
mixer = to_sti_mixer(crtc);
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
btm_field = list->btm_field;
dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
sti_plane_to_str(plane), top_field, btm_field);
/* build the top field */
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(fb->pixel_format);
if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
(char *)&fb->pixel_format);
return;
}
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
return;
}
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
(unsigned long)cma_obj->paddr);
/* pixel memory location */
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * (bpp >> 3);
top_field->gam_gdp_pml += src_y * fb->pitches[0];
/* input parameters */
top_field->gam_gdp_pmp = fb->pitches[0];
top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
/* output parameters */
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
fb->pitches[0];
if (first_prepare) {
/* Register gdp callback */
if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
&gdp->vtg_field_nb, mixer->id)) {
DRM_ERROR("Cannot register VTG notifier\n");
return;
}
/* Set and enable gdp clock */
if (gdp->clk_pix) {
struct clk *clkp;
int rate = mode->clock * 1000;
/* According to the mixer used, the gdp pixel clock
* should have a different parent clock. */
if (mixer->id == STI_MIXER_MAIN)
clkp = gdp->clk_main_parent;
else
clkp = gdp->clk_aux_parent;
if (clkp)
clk_set_parent(gdp->clk_pix, clkp);
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
rate);
return;
}
if (clk_prepare_enable(gdp->clk_pix)) {
DRM_ERROR("Failed to prepare/enable gdp\n");
return;
}
}
}
/* Update the NVN field of the 'right' field of the current GDP node
* (being used by the HW) with the address of the updated ('free') top
* field GDP node.
* - In interlaced mode the 'right' field is the bottom field as we
* update frames starting from their top field
* - In progressive mode, we update both bottom and top fields which
* are equal nodes.
* At the next VSYNC, the updated node list will be used by the HW.
*/
curr_list = sti_gdp_get_current_nodes(gdp);
dma_updated_top = list->top_field_paddr;
dma_updated_btm = list->btm_field_paddr;
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
(unsigned long)cma_obj->paddr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
/* First update or invalid node should directly write in the
* hw register */
DRM_DEBUG_DRIVER("%s first update (or invalid node)",
sti_plane_to_str(plane));
writel(gdp->is_curr_top ?
dma_updated_btm : dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
goto end;
}
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (gdp->is_curr_top) {
/* Do not update in the middle of the frame, but
* postpone the update after the bottom field has
* been displayed */
curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
} else {
/* Direct update to avoid one frame delay */
writel(dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
}
} else {
/* Direct update for progressive to avoid one frame delay */
writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
}
end:
plane->status = STI_PLANE_UPDATED;
}
static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_update = sti_gdp_atomic_update,
.atomic_disable = sti_gdp_atomic_disable,
};
struct sti_layer *sti_gdp_create(struct device *dev, int id)
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs,
enum drm_plane_type type)
{
struct sti_gdp *gdp;
int res;
gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
if (!gdp) {
@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
return NULL;
}
gdp->layer.ops = &gdp_ops;
gdp->dev = dev;
gdp->regs = baseaddr;
gdp->plane.desc = desc;
gdp->plane.status = STI_PLANE_DISABLED;
gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
return (struct sti_layer *)gdp;
sti_gdp_init(gdp);
res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
possible_crtcs,
&sti_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
type);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err;
}
drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
sti_plane_init_property(&gdp->plane, type);
return &gdp->plane.drm_plane;
err:
devm_kfree(dev, gdp);
return NULL;
}

View File

@ -11,6 +11,9 @@
#include <linux/types.h>
struct sti_layer *sti_gdp_create(struct device *dev, int id);
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs,
enum drm_plane_type type);
#endif

View File

@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
return count;
fail:
DRM_ERROR("Can not read HDMI EDID\n");
DRM_ERROR("Can't read HDMI EDID\n");
return 0;
}
@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
struct device_node *ddc;
int err;
ddc = of_parse_phandle(dev->of_node, "ddc", 0);
if (ddc) {
hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
if (!hdmi->ddc_adapt) {
err = -EPROBE_DEFER;
of_node_put(ddc);
return err;
}
of_node_put(ddc);
}
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
struct resource *res;
struct device_node *ddc;
int ret;
DRM_INFO("%s\n", __func__);
@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
if (!hdmi)
return -ENOMEM;
ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
if (ddc) {
hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
if (!hdmi->ddc_adapt) {
of_node_put(ddc);
return -EPROBE_DEFER;
}
of_node_put(ddc);
}
hdmi->dev = pdev->dev;
/* Get resources */

View File

@ -12,11 +12,12 @@
#include <linux/reset.h>
#include <drm/drmP.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_drm_plane.h"
#include "sti_hqvdp.h"
#include "sti_compositor.h"
#include "sti_hqvdp_lut.h"
#include "sti_layer.h"
#include "sti_plane.h"
#include "sti_vtg.h"
/* Firmware name */
@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
* @dev: driver device
* @drm_dev: the drm device
* @regs: registers
* @layer: layer structure for hqvdp it self
* @vid_plane: VID plug used as link with compositor IP
* @plane: plane structure for hqvdp it self
* @clk: IP clock
* @clk_pix_main: pix main clock
* @reset: reset control
@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
* @xp70_initialized: true if xp70 is already initialized
*/
struct sti_hqvdp {
struct device *dev;
struct drm_device *drm_dev;
void __iomem *regs;
struct sti_layer layer;
struct drm_plane *vid_plane;
struct sti_plane plane;
struct clk *clk;
struct clk *clk_pix_main;
struct reset_control *reset;
@ -351,24 +351,15 @@ struct sti_hqvdp {
void *hqvdp_cmd;
dma_addr_t hqvdp_cmd_paddr;
struct sti_vtg *vtg;
bool xp70_initialized;
};
#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
static const uint32_t hqvdp_supported_formats[] = {
DRM_FORMAT_NV12,
};
static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
{
return hqvdp_supported_formats;
}
static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
{
return ARRAY_SIZE(hqvdp_supported_formats);
}
/**
* sti_hqvdp_get_free_cmd
* @hqvdp: hqvdp structure
@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
/**
* sti_hqvdp_check_hw_scaling
* @layer: hqvdp layer
* @hqvdp: hqvdp pointer
* @mode: display mode with timing constraints
* @src_w: source width
* @src_h: source height
* @dst_w: destination width
* @dst_h: destination height
*
* Check if the HW is able to perform the scaling request
* The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
* RETURNS:
* True if the HW can scale.
*/
static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
struct drm_display_mode *mode,
int src_w, int src_h,
int dst_w, int dst_h)
{
struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
unsigned long lfw;
unsigned int inv_zy;
lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
lfw /= max(src_w, dst_w) * mode->clock / 1000;
inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
inv_zy = DIV_ROUND_UP(src_h, dst_h);
return (inv_zy <= lfw) ? true : false;
}
/**
* sti_hqvdp_prepare_layer
* @layer: hqvdp layer
* @first_prepare: true if it is the first time this function is called
* sti_hqvdp_disable
* @hqvdp: hqvdp pointer
*
* Prepares a command for the firmware
*
* RETURNS:
* 0 on success.
* Disables the HQVDP plane
*/
static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
{
struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
/* prepare and commit VID plane */
hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
layer->crtc, layer->fb,
layer->dst_x, layer->dst_y,
layer->dst_w, layer->dst_h,
layer->src_x, layer->src_y,
layer->src_w, layer->src_h);
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
DRM_ERROR("No available hqvdp_cmd now\n");
return -EBUSY;
}
cmd = hqvdp->hqvdp_cmd + cmd_offset;
if (!sti_hqvdp_check_hw_scaling(layer)) {
DRM_ERROR("Scaling beyond HW capabilities\n");
return -EINVAL;
}
/* Static parameters, defaulting to progressive mode */
cmd->top.config = TOP_CONFIG_PROGRESSIVE;
cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
cmd->csdi.config = CSDI_CONFIG_PROG;
/* VC1RE, FMD bypassed : keep everything set to 0
* IQI/P2I bypassed */
cmd->iqi.config = IQI_CONFIG_DFLT;
cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
/* Buffer planes address */
cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
layer->pitches[0];
cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
layer->pitches[1];
/* Input / output size
* Align to upper even value */
layer->dst_w = ALIGN(layer->dst_w, 2);
layer->dst_h = ALIGN(layer->dst_h, 2);
if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
(layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
(layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
(layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
layer->src_w, layer->src_h,
layer->dst_w, layer->dst_h);
return -EINVAL;
}
cmd->top.input_viewport_size = cmd->top.input_frame_size =
layer->src_h << 16 | layer->src_w;
cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
/* Handle interlaced */
if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
/* Top field to display */
cmd->top.config = TOP_CONFIG_INTER_TOP;
/* Update pitches and vert size */
cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
layer->src_w;
cmd->top.luma_processed_pitch *= 2;
cmd->top.luma_src_pitch *= 2;
cmd->top.chroma_processed_pitch *= 2;
cmd->top.chroma_src_pitch *= 2;
/* Enable directional deinterlacing processing */
cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
}
/* Update hvsrc lut coef */
scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
if (first_prepare) {
/* Prevent VTG shutdown */
if (clk_prepare_enable(hqvdp->clk_pix_main)) {
DRM_ERROR("Failed to prepare/enable pix main clk\n");
return -ENXIO;
}
/* Register VTG Vsync callback to handle bottom fields */
if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
sti_vtg_register_client(hqvdp->vtg,
&hqvdp->vtg_nb, layer->mixer_id)) {
DRM_ERROR("Cannot register VTG notifier\n");
return -ENXIO;
}
}
return 0;
}
static int sti_hqvdp_commit_layer(struct sti_layer *layer)
{
struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int cmd_offset;
dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
DRM_ERROR("No available hqvdp_cmd now\n");
return -EBUSY;
}
writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
hqvdp->curr_field_count++;
/* Interlaced : get ready to display the bottom field at next Vsync */
if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
hqvdp->btm_field_pending = true;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
return 0;
}
static int sti_hqvdp_disable_layer(struct sti_layer *layer)
{
struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int i;
DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
/* Unregister VTG Vsync callback */
if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* Set next cmd to NULL */
@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
/* VTG can stop now */
clk_disable_unprepare(hqvdp->clk_pix_main);
if (i == POLL_MAX_ATTEMPT) {
if (i == POLL_MAX_ATTEMPT)
DRM_ERROR("XP70 could not revert to idle\n");
return -ENXIO;
}
/* disable VID plane */
hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
return 0;
hqvdp->plane.status = STI_PLANE_DISABLED;
}
/**
@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
/* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&hqvdp->plane));
sti_hqvdp_disable(hqvdp);
}
if (hqvdp->btm_field_pending) {
/* Create the btm field command from the current one */
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
return 0;
}
static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
struct drm_plane *plane;
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct sti_layer *layer = to_sti_layer(plane);
if (layer->desc == id)
return plane;
}
return NULL;
}
static void sti_hqvd_init(struct sti_layer *layer)
{
struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
int size;
/* find the plane macthing with vid 0 */
hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
if (!hqvdp->vid_plane) {
DRM_ERROR("Cannot find Main video layer\n");
return;
}
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
memset(hqvdp->hqvdp_cmd, 0, size);
}
static const struct sti_layer_funcs hqvdp_ops = {
.get_formats = sti_hqvdp_get_formats,
.get_nb_formats = sti_hqvdp_get_nb_formats,
.init = sti_hqvd_init,
.prepare = sti_hqvdp_prepare_layer,
.commit = sti_hqvdp_commit_layer,
.disable = sti_hqvdp_disable_layer,
static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
struct drm_display_mode *mode = &crtc->mode;
int dst_x = state->crtc_x;
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
int src_x = state->src_x >> 16;
int src_y = state->src_y >> 16;
int src_w = state->src_w >> 16;
int src_h = state->src_h >> 16;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct drm_gem_cma_object *cma_obj;
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
DRM_ERROR("No available hqvdp_cmd now\n");
return;
}
cmd = hqvdp->hqvdp_cmd + cmd_offset;
if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
src_w, src_h,
dst_w, dst_h)) {
DRM_ERROR("Scaling beyond HW capabilities\n");
return;
}
/* Static parameters, defaulting to progressive mode */
cmd->top.config = TOP_CONFIG_PROGRESSIVE;
cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
cmd->csdi.config = CSDI_CONFIG_PROG;
/* VC1RE, FMD bypassed : keep everything set to 0
* IQI/P2I bypassed */
cmd->iqi.config = IQI_CONFIG_DFLT;
cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
return;
}
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
(unsigned long)cma_obj->paddr);
/* Buffer planes address */
cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = fb->pitches[0];
cmd->top.luma_src_pitch = fb->pitches[0];
cmd->top.chroma_processed_pitch = fb->pitches[1];
cmd->top.chroma_src_pitch = fb->pitches[1];
/* Input / output size
* Align to upper even value */
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
(src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
(dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
(dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
src_w, src_h,
dst_w, dst_h);
return;
}
cmd->top.input_viewport_size = src_h << 16 | src_w;
cmd->top.input_frame_size = src_h << 16 | src_w;
cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
cmd->top.input_viewport_ori = src_y << 16 | src_x;
/* Handle interlaced */
if (fb->flags & DRM_MODE_FB_INTERLACED) {
/* Top field to display */
cmd->top.config = TOP_CONFIG_INTER_TOP;
/* Update pitches and vert size */
cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
cmd->top.luma_processed_pitch *= 2;
cmd->top.luma_src_pitch *= 2;
cmd->top.chroma_processed_pitch *= 2;
cmd->top.chroma_src_pitch *= 2;
/* Enable directional deinterlacing processing */
cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
}
/* Update hvsrc lut coef */
scale_h = SCALE_FACTOR * dst_w / src_w;
sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
scale_v = SCALE_FACTOR * dst_h / src_h;
sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
if (first_prepare) {
/* Prevent VTG shutdown */
if (clk_prepare_enable(hqvdp->clk_pix_main)) {
DRM_ERROR("Failed to prepare/enable pix main clk\n");
return;
}
/* Register VTG Vsync callback to handle bottom fields */
if (sti_vtg_register_client(hqvdp->vtg,
&hqvdp->vtg_nb,
mixer->id)) {
DRM_ERROR("Cannot register VTG notifier\n");
return;
}
}
writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
hqvdp->curr_field_count++;
/* Interlaced : get ready to display the bottom field at next Vsync */
if (fb->flags & DRM_MODE_FB_INTERLACED)
hqvdp->btm_field_pending = true;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
plane->status = STI_PLANE_UPDATED;
}
static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_update = sti_hqvdp_atomic_update,
.atomic_disable = sti_hqvdp_atomic_disable,
};
struct sti_layer *sti_hqvdp_create(struct device *dev)
static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
struct device *dev, int desc)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
int res;
hqvdp->layer.ops = &hqvdp_ops;
hqvdp->plane.desc = desc;
hqvdp->plane.status = STI_PLANE_DISABLED;
return &hqvdp->layer;
sti_hqvdp_init(hqvdp);
res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
&sti_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
DRM_PLANE_TYPE_OVERLAY);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
return NULL;
}
drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
return &hqvdp->plane.drm_plane;
}
EXPORT_SYMBOL(sti_hqvdp_create);
static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
{
@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
} *header;
DRM_DEBUG_DRIVER("\n");
if (hqvdp->xp70_initialized) {
DRM_INFO("HQVDP XP70 already initialized\n");
return;
}
/* Check firmware parts */
if (!firmware) {
DRM_ERROR("Firmware not available\n");
@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
/* Launch Vsync */
writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
DRM_INFO("HQVDP XP70 started\n");
DRM_INFO("HQVDP XP70 initialized\n");
hqvdp->xp70_initialized = true;
out:
release_firmware(firmware);
}
@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct sti_layer *layer;
struct drm_plane *plane;
int err;
DRM_DEBUG_DRIVER("\n");
@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
return err;
}
layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
if (!layer) {
/* Create HQVDP plane once xp70 is initialized */
plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
if (!plane)
DRM_ERROR("Can't create HQVDP plane\n");
return -ENOMEM;
}
sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
return 0;
}

View File

@ -1,12 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_HQVDP_H_
#define _STI_HQVDP_H_
struct sti_layer *sti_hqvdp_create(struct device *dev);
#endif

View File

@ -1,213 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#include <drm/drmP.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
#include "sti_gdp.h"
#include "sti_hqvdp.h"
#include "sti_layer.h"
#include "sti_vid.h"
const char *sti_layer_to_str(struct sti_layer *layer)
{
switch (layer->desc) {
case STI_GDP_0:
return "GDP0";
case STI_GDP_1:
return "GDP1";
case STI_GDP_2:
return "GDP2";
case STI_GDP_3:
return "GDP3";
case STI_VID_0:
return "VID0";
case STI_VID_1:
return "VID1";
case STI_CURSOR:
return "CURSOR";
case STI_HQVDP_0:
return "HQVDP0";
default:
return "<UNKNOWN LAYER>";
}
}
EXPORT_SYMBOL(sti_layer_to_str);
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr)
{
struct sti_layer *layer = NULL;
switch (desc & STI_LAYER_TYPE_MASK) {
case STI_GDP:
layer = sti_gdp_create(dev, desc);
break;
case STI_VID:
layer = sti_vid_create(dev);
break;
case STI_CUR:
layer = sti_cursor_create(dev);
break;
case STI_VDP:
layer = sti_hqvdp_create(dev);
break;
}
if (!layer) {
DRM_ERROR("Failed to create layer\n");
return NULL;
}
layer->desc = desc;
layer->dev = dev;
layer->regs = baseaddr;
layer->ops->init(layer);
DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
return layer;
}
EXPORT_SYMBOL(sti_layer_create);
int sti_layer_prepare(struct sti_layer *layer,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode, int mixer_id,
int dest_x, int dest_y, int dest_w, int dest_h,
int src_x, int src_y, int src_w, int src_h)
{
int ret;
unsigned int i;
struct drm_gem_cma_object *cma_obj;
if (!layer || !fb || !mode) {
DRM_ERROR("Null fb, layer or mode\n");
return 1;
}
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
return 1;
}
layer->crtc = crtc;
layer->fb = fb;
layer->mode = mode;
layer->mixer_id = mixer_id;
layer->dst_x = dest_x;
layer->dst_y = dest_y;
layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
layer->src_x = src_x;
layer->src_y = src_y;
layer->src_w = src_w;
layer->src_h = src_h;
layer->format = fb->pixel_format;
layer->vaddr = cma_obj->vaddr;
layer->paddr = cma_obj->paddr;
for (i = 0; i < 4; i++) {
layer->pitches[i] = fb->pitches[i];
layer->offsets[i] = fb->offsets[i];
}
DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
sti_layer_to_str(layer),
layer->mixer_id);
DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_layer_to_str(layer),
layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
layer->src_w, layer->src_h, layer->src_x,
layer->src_y);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&layer->format, (unsigned long)layer->paddr);
if (!layer->ops->prepare)
goto err_no_prepare;
ret = layer->ops->prepare(layer, !layer->enabled);
if (!ret)
layer->enabled = true;
return ret;
err_no_prepare:
DRM_ERROR("Cannot prepare\n");
return 1;
}
int sti_layer_commit(struct sti_layer *layer)
{
if (!layer)
return 1;
if (!layer->ops->commit)
goto err_no_commit;
return layer->ops->commit(layer);
err_no_commit:
DRM_ERROR("Cannot commit\n");
return 1;
}
int sti_layer_disable(struct sti_layer *layer)
{
int ret;
DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
if (!layer)
return 1;
if (!layer->enabled)
return 0;
if (!layer->ops->disable)
goto err_no_disable;
ret = layer->ops->disable(layer);
if (!ret)
layer->enabled = false;
else
DRM_ERROR("Disable failed\n");
return ret;
err_no_disable:
DRM_ERROR("Cannot disable\n");
return 1;
}
const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
{
if (!layer)
return NULL;
if (!layer->ops->get_formats)
return NULL;
return layer->ops->get_formats(layer);
}
unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
{
if (!layer)
return 0;
if (!layer->ops->get_nb_formats)
return 0;
return layer->ops->get_nb_formats(layer);
}

View File

@ -1,131 +0,0 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_LAYER_H_
#define _STI_LAYER_H_
#include <drm/drmP.h>
#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
#define STI_LAYER_TYPE_SHIFT 8
#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
struct sti_layer;
enum sti_layer_type {
STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
};
enum sti_layer_id_of_type {
STI_ID_0 = 0,
STI_ID_1 = 1,
STI_ID_2 = 2,
STI_ID_3 = 3
};
enum sti_layer_desc {
STI_GDP_0 = STI_GDP | STI_ID_0,
STI_GDP_1 = STI_GDP | STI_ID_1,
STI_GDP_2 = STI_GDP | STI_ID_2,
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_VID_0 = STI_VID | STI_ID_0,
STI_VID_1 = STI_VID | STI_ID_1,
STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
/**
* STI layer functions structure
*
* @get_formats: get layer supported formats
* @get_nb_formats: get number of format supported
* @init: initialize the layer
* @prepare: prepare layer before rendering
* @commit: set layer for rendering
* @disable: disable layer
*/
struct sti_layer_funcs {
const uint32_t* (*get_formats)(struct sti_layer *layer);
unsigned int (*get_nb_formats)(struct sti_layer *layer);
void (*init)(struct sti_layer *layer);
int (*prepare)(struct sti_layer *layer, bool first_prepare);
int (*commit)(struct sti_layer *layer);
int (*disable)(struct sti_layer *layer);
};
/**
* STI layer structure
*
* @plane: drm plane it is bound to (if any)
* @fb: drm fb it is bound to
* @crtc: crtc it is bound to
* @mode: display mode
* @desc: layer type & id
* @device: driver device
* @regs: layer registers
* @ops: layer functions
* @zorder: layer z-order
* @mixer_id: id of the mixer used to display the layer
* @enabled: to know if the layer is active or not
* @src_x src_y: coordinates of the input (fb) area
* @src_w src_h: size of the input (fb) area
* @dst_x dst_y: coordinates of the output (crtc) area
* @dst_w dst_h: size of the output (crtc) area
* @format: format
* @pitches: pitch of 'planes' (eg: Y, U, V)
* @offsets: offset of 'planes'
* @vaddr: virtual address of the input buffer
* @paddr: physical address of the input buffer
*/
struct sti_layer {
struct drm_plane plane;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
enum sti_layer_desc desc;
struct device *dev;
void __iomem *regs;
const struct sti_layer_funcs *ops;
int zorder;
int mixer_id;
bool enabled;
int src_x, src_y;
int src_w, src_h;
int dst_x, dst_y;
int dst_w, dst_h;
uint32_t format;
unsigned int pitches[4];
unsigned int offsets[4];
void *vaddr;
dma_addr_t paddr;
};
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr);
int sti_layer_prepare(struct sti_layer *layer,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode,
int mixer_id,
int dest_x, int dest_y,
int dest_w, int dest_h,
int src_x, int src_y,
int src_w, int src_h);
int sti_layer_commit(struct sti_layer *layer);
int sti_layer_disable(struct sti_layer *layer);
const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
const char *sti_layer_to_str(struct sti_layer *layer);
#endif

View File

@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
return "<UNKNOWN MIXER>";
}
}
EXPORT_SYMBOL(sti_mixer_to_str);
static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
{
@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
}
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
int layer_id = 0, depth = layer->zorder;
int plane_id, depth = plane->zorder;
unsigned int i;
u32 mask, val;
if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
return 1;
switch (layer->desc) {
switch (plane->desc) {
case STI_GDP_0:
layer_id = GAM_DEPTH_GDP0_ID;
plane_id = GAM_DEPTH_GDP0_ID;
break;
case STI_GDP_1:
layer_id = GAM_DEPTH_GDP1_ID;
plane_id = GAM_DEPTH_GDP1_ID;
break;
case STI_GDP_2:
layer_id = GAM_DEPTH_GDP2_ID;
plane_id = GAM_DEPTH_GDP2_ID;
break;
case STI_GDP_3:
layer_id = GAM_DEPTH_GDP3_ID;
plane_id = GAM_DEPTH_GDP3_ID;
break;
case STI_VID_0:
case STI_HQVDP_0:
layer_id = GAM_DEPTH_VID0_ID;
break;
case STI_VID_1:
layer_id = GAM_DEPTH_VID1_ID;
plane_id = GAM_DEPTH_VID0_ID;
break;
case STI_CURSOR:
/* no need to set depth for cursor */
return 0;
default:
DRM_ERROR("Unknown layer %d\n", layer->desc);
DRM_ERROR("Unknown plane %d\n", plane->desc);
return 1;
}
mask = GAM_DEPTH_MASK_ID << (3 * depth);
layer_id = layer_id << (3 * depth);
/* Search if a previous depth was already assigned to the plane */
val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
mask = GAM_DEPTH_MASK_ID << (3 * i);
if ((val & mask) == plane_id << (3 * i))
break;
}
mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
plane_id = plane_id << (3 * (depth - 1));
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
sti_layer_to_str(layer), depth);
sti_plane_to_str(plane), depth);
dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
layer_id, mask);
plane_id, mask);
val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
val &= ~mask;
val |= layer_id;
val |= plane_id;
sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
return 0;
}
static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
{
switch (layer->desc) {
switch (plane->desc) {
case STI_BACK:
return GAM_CTL_BACK_MASK;
case STI_GDP_0:
@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
return GAM_CTL_GDP2_MASK;
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
case STI_VID_0:
case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
case STI_VID_1:
return GAM_CTL_VID1_MASK;
case STI_CURSOR:
return GAM_CTL_CURSOR_MASK;
default:
@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
}
}
int sti_mixer_set_layer_status(struct sti_mixer *mixer,
struct sti_layer *layer, bool status)
int sti_mixer_set_plane_status(struct sti_mixer *mixer,
struct sti_plane *plane, bool status)
{
u32 mask, val;
DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
sti_mixer_to_str(mixer), sti_layer_to_str(layer));
sti_mixer_to_str(mixer), sti_plane_to_str(plane));
mask = sti_mixer_get_layer_mask(layer);
mask = sti_mixer_get_plane_mask(plane);
if (!mask) {
DRM_ERROR("Can not find layer mask\n");
DRM_ERROR("Can't find layer mask\n");
return -EINVAL;
}
@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
{
u32 val;
DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
}
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;

View File

@ -11,10 +11,16 @@
#include <drm/drmP.h>
#include "sti_layer.h"
#include "sti_plane.h"
#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
enum sti_mixer_status {
STI_MIXER_READY,
STI_MIXER_DISABLING,
STI_MIXER_DISABLED,
};
/**
* STI Mixer subdevice structure
*
@ -23,7 +29,7 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
* @enabled: to know if the mixer is active or not
* @status: to know the status of the mixer
*/
struct sti_mixer {
struct device *dev;
@ -31,7 +37,7 @@ struct sti_mixer {
int id;
struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
bool enabled;
enum sti_mixer_status status;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
@ -39,17 +45,16 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer);
struct sti_mixer *sti_mixer_create(struct device *dev, int id,
void __iomem *baseaddr);
int sti_mixer_set_layer_status(struct sti_mixer *mixer,
struct sti_layer *layer, bool status);
void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
int sti_mixer_set_plane_status(struct sti_mixer *mixer,
struct sti_plane *plane, bool status);
int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
struct drm_display_mode *mode);
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 7
#define GAM_MIXER_NB_DEPTH_LEVEL 6
#define STI_MIXER_MAIN 0
#define STI_MIXER_AUX 1

View File

@ -0,0 +1,122 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#include <drm/drmP.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
#include "sti_plane.h"
/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
enum sti_plane_desc sti_plane_default_zorder[] = {
STI_GDP_0,
STI_GDP_1,
STI_HQVDP_0,
STI_GDP_2,
STI_GDP_3,
};
const char *sti_plane_to_str(struct sti_plane *plane)
{
switch (plane->desc) {
case STI_GDP_0:
return "GDP0";
case STI_GDP_1:
return "GDP1";
case STI_GDP_2:
return "GDP2";
case STI_GDP_3:
return "GDP3";
case STI_HQVDP_0:
return "HQVDP0";
case STI_CURSOR:
return "CURSOR";
default:
return "<UNKNOWN PLANE>";
}
}
EXPORT_SYMBOL(sti_plane_to_str);
static void sti_plane_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
drm_plane_helper_disable(drm_plane);
drm_plane_cleanup(drm_plane);
}
static int sti_plane_set_property(struct drm_plane *drm_plane,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = drm_plane->dev;
struct sti_private *private = dev->dev_private;
struct sti_plane *plane = to_sti_plane(drm_plane);
DRM_DEBUG_DRIVER("\n");
if (property == private->plane_zorder_property) {
plane->zorder = val;
return 0;
}
return -EINVAL;
}
static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
{
struct drm_device *dev = drm_plane->dev;
struct sti_private *private = dev->dev_private;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct drm_property *prop;
prop = private->plane_zorder_property;
if (!prop) {
prop = drm_property_create_range(dev, 0, "zpos", 1,
GAM_MIXER_NB_DEPTH_LEVEL);
if (!prop)
return;
private->plane_zorder_property = prop;
}
drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
}
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
if (sti_plane_default_zorder[i] == plane->desc)
break;
plane->zorder = i + 1;
if (type == DRM_PLANE_TYPE_OVERLAY)
sti_plane_attach_zorder_property(&plane->drm_plane);
DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
plane->drm_plane.base.id,
sti_plane_to_str(plane), plane->zorder);
}
EXPORT_SYMBOL(sti_plane_init_property);
struct drm_plane_funcs sti_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_plane_destroy,
.set_property = sti_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
EXPORT_SYMBOL(sti_plane_helpers_funcs);

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_PLANE_H_
#define _STI_PLANE_H_
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
extern struct drm_plane_funcs sti_plane_helpers_funcs;
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
#define STI_PLANE_TYPE_SHIFT 8
#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
enum sti_plane_type {
STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
};
enum sti_plane_id_of_type {
STI_ID_0 = 0,
STI_ID_1 = 1,
STI_ID_2 = 2,
STI_ID_3 = 3
};
enum sti_plane_desc {
STI_GDP_0 = STI_GDP | STI_ID_0,
STI_GDP_1 = STI_GDP | STI_ID_1,
STI_GDP_2 = STI_GDP | STI_ID_2,
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
enum sti_plane_status {
STI_PLANE_READY,
STI_PLANE_UPDATED,
STI_PLANE_DISABLING,
STI_PLANE_FLUSHING,
STI_PLANE_DISABLED,
};
/**
* STI plane structure
*
* @plane: drm plane it is bound to (if any)
* @desc: plane type & id
* @status: to know the status of the plane
* @zorder: plane z-order
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
int zorder;
};
const char *sti_plane_to_str(struct sti_plane *plane);
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
#endif

View File

@ -16,7 +16,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "sti_drm_crtc.h"
#include "sti_crtc.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
unsigned int i;
int ret;
tvout->drm_dev = drm_dev;
@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
sti_tvout_create_encoders(drm_dev, tvout);
ret = component_bind_all(dev, drm_dev);
if (ret)
sti_tvout_destroy_encoders(tvout);
return ret;
return 0;
}
static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
/* do nothing */
struct sti_tvout *tvout = dev_get_drvdata(dev);
sti_tvout_destroy_encoders(tvout);
}
static const struct component_ops sti_tvout_ops = {
@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
.unbind = sti_tvout_unbind,
};
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int sti_tvout_master_bind(struct device *dev)
{
return 0;
}
static void sti_tvout_master_unbind(struct device *dev)
{
/* do nothing */
}
static const struct component_master_ops sti_tvout_master_ops = {
.bind = sti_tvout_master_bind,
.unbind = sti_tvout_master_unbind,
};
static int sti_tvout_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
struct resource *res;
struct device_node *child_np;
struct component_match *match = NULL;
DRM_INFO("%s\n", __func__);
@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tvout);
of_platform_populate(node, NULL, NULL, dev);
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
component_match_add(dev, &match, compare_of, child_np);
of_node_put(child_np);
child_np = of_get_next_available_child(node, child_np);
}
component_master_add_with_match(dev, &sti_tvout_master_ops, match);
return component_add(dev, &sti_tvout_ops);
}
static int sti_tvout_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sti_tvout_master_ops);
component_del(&pdev->dev, &sti_tvout_ops);
return 0;
}

View File

@ -6,7 +6,7 @@
#include <drm/drmP.h>
#include "sti_layer.h"
#include "sti_plane.h"
#include "sti_vid.h"
#include "sti_vtg.h"
@ -43,35 +43,37 @@
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state)
{
u32 val;
struct drm_crtc *crtc = state->crtc;
struct drm_display_mode *mode = &crtc->mode;
int dst_x = state->crtc_x;
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
u32 val, ydo, xdo, yds, xds;
/* Input / output size
* Align to upper even value */
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
/* Unmask */
val = readl(vid->regs + VID_CTL);
val &= ~VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
return 0;
}
static int sti_vid_commit_layer(struct sti_layer *vid)
{
struct drm_display_mode *mode = vid->mode;
u32 ydo, xdo, yds, xds;
ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
return 0;
}
static int sti_vid_disable_layer(struct sti_layer *vid)
void sti_vid_disable(struct sti_vid *vid)
{
u32 val;
@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
val = readl(vid->regs + VID_CTL);
val |= VID_CTL_IGNORE;
writel(val, vid->regs + VID_CTL);
return 0;
}
static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
{
return NULL;
}
static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
{
return 0;
}
static void sti_vid_init(struct sti_layer *vid)
static void sti_vid_init(struct sti_vid *vid)
{
/* Enable PSI, Mask layer */
writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
static const struct sti_layer_funcs vid_ops = {
.get_formats = sti_vid_get_formats,
.get_nb_formats = sti_vid_get_nb_formats,
.init = sti_vid_init,
.prepare = sti_vid_prepare_layer,
.commit = sti_vid_commit_layer,
.disable = sti_vid_disable_layer,
};
struct sti_layer *sti_vid_create(struct device *dev)
struct sti_vid *sti_vid_create(struct device *dev, int id,
void __iomem *baseaddr)
{
struct sti_layer *vid;
struct sti_vid *vid;
vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
if (!vid) {
@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
return NULL;
}
vid->ops = &vid_ops;
vid->dev = dev;
vid->regs = baseaddr;
vid->id = id;
sti_vid_init(vid);
return vid;
}

View File

@ -7,6 +7,23 @@
#ifndef _STI_VID_H_
#define _STI_VID_H_
struct sti_layer *sti_vid_create(struct device *dev);
/**
* STI VID structure
*
* @dev: driver device
* @regs: vid registers
* @id: id of the vid
*/
struct sti_vid {
struct device *dev;
void __iomem *regs;
int id;
};
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state);
void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, int id,
void __iomem *baseaddr);
#endif