forked from Minki/linux
amdgpu, i915, exynos, udl, sii8620 and core fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJbPsIxAAoJEAx081l5xIa+assP/3i3tpzxASSaONcMoIzNFSv7 8IXTKIaQpoIidhlh4M4i4xhvDoSLzZnB9Y/ITw9a2piceyTB0Oa1VmxDeYp9PqR1 jM5Vuqt+WUSoxYAvwdqxbeMuTHY3C2Ff3POcW/siG/Bl0HIV+ajdvPIsh/Gy0/b9 Q1G0YzZgH7D/VHaUipdjDtsFY+f2YSgM3p618P2A/lDP8WRW66y4CUNDdPPQ+UjU hLFyumOvywVHtvuDToeQezHrvbAbeYunt9nGx8RzPgL9X6m5+uX68y+HSxhcxR5T rzb5ozeFLT6R+7VKbY9XgXHTxuMQDEeEQQge6iWMDyBGIhfixW3BoH462spkfaCB 6YceH3y2SsfSOPGax1wmUCQVUAFvPuUYPZv1D356f8AEEtpRTkrqkMd4QuAF9L7M Yvx7fhbWjIw3G0m4Sj6HYyBjRDzBSz1QiIq9W05+4EghGQAJ/2TcsXx7BApZz9VH 01UawXxsurG6Z7JuIXiG7CdSCYklqi3RMnBlDG9TLLfLqzwpwpPjdQ4m7KZQk2dN 8FoQKMfjzZsuDFVXtCCNEOf9khImiE31335rLvDAIYPqEDmhJb1Xu6r6UWCMgQgx iEqCJ4SSWjR6wpwcWB4cZ+7Lb8QlmZ3JgqtaoJ3a23C5lIN4lgyrtrHHsGLb4+j2 q0vL5pN3uqSCIOeBOpzk =v25Z -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2018-07-06' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "This is the drm fixes for rc4. It's a bit larger than I'd like but the exynos cleanups are pretty mechanical, and I'd rather have them in sooner rather than later so we can avoid too much conflicts around them. The non-mechanincal exynos changes are mostly fixes for new feature recently introduced. Apart from the exynos updates, we have: i915: - GVT and GGTT mapping fixes amdgpu: - fix HDMI2.0 4K@60 Hz regression - Hotplug fixes for dual-GPU laptops to make power management better - misc vega12 bios fixes, a race fix and some typos. sii8620 bridge: - small fixes around mode setting core: - use kvzalloc to allocate blob property memory" * tag 'drm-fixes-2018-07-06' of git://anongit.freedesktop.org/drm/drm: (34 commits) drm/amd/display: add a check for display depth validity drm/amd/display: adding ycbcr420 pixel encoding for hdmi drm/udl: fix display corruption of the last line drm/bridge/sii8620: Fix link mode selection drm/bridge/sii8620: Fix display of packed pixel modes drm/bridge/sii8620: Send AVI infoframe in all MHL versions drm/amdgpu: fix user fence write race condition drm/i915: Try GGTT mmapping whole object as partial drm/amdgpu/pm: fix display count in non-DC path drm/amdgpu: fix swapped emit_ib_size in vce3 drm: Use kvzalloc for allocating blob property memory drm/i915/gvt: changed DDI mode emulation type drm/i915/gvt: fix a bug of partially write ggtt enties drm/exynos: Replace drm_dev_unref with drm_dev_put drm/exynos: Replace drm_gem_object_unreference_unlocked with put function drm/exynos: Replace drm_framebuffer_{un/reference} with put,get functions drm/exynos: ipp: use correct enum type drm/exynos: decon5433: Fix WINCONx reset value drm/exynos: decon5433: Fix per-plane global alpha for XRGB modes drm/exynos: fimc: Use real buffer width for configuring the hardware ...
This commit is contained in:
commit
c42c12a905
@ -190,6 +190,7 @@ struct amdgpu_job;
|
||||
struct amdgpu_irq_src;
|
||||
struct amdgpu_fpriv;
|
||||
struct amdgpu_bo_va_mapping;
|
||||
struct amdgpu_atif;
|
||||
|
||||
enum amdgpu_cp_irq {
|
||||
AMDGPU_CP_IRQ_GFX_EOP = 0,
|
||||
@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
|
||||
/*
|
||||
* ACPI
|
||||
*/
|
||||
struct amdgpu_atif_notification_cfg {
|
||||
bool enabled;
|
||||
int command_code;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_notifications {
|
||||
bool display_switch;
|
||||
bool expansion_mode_change;
|
||||
bool thermal_state;
|
||||
bool forced_power_state;
|
||||
bool system_power_state;
|
||||
bool display_conf_change;
|
||||
bool px_gfx_switch;
|
||||
bool brightness_change;
|
||||
bool dgpu_display_event;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_functions {
|
||||
bool system_params;
|
||||
bool sbios_requests;
|
||||
bool select_active_disp;
|
||||
bool lid_state;
|
||||
bool get_tv_standard;
|
||||
bool set_tv_standard;
|
||||
bool get_panel_expansion_mode;
|
||||
bool set_panel_expansion_mode;
|
||||
bool temperature_change;
|
||||
bool graphics_device_types;
|
||||
};
|
||||
|
||||
struct amdgpu_atif {
|
||||
struct amdgpu_atif_notifications notifications;
|
||||
struct amdgpu_atif_functions functions;
|
||||
struct amdgpu_atif_notification_cfg notification_cfg;
|
||||
struct amdgpu_encoder *encoder_for_bl;
|
||||
};
|
||||
|
||||
struct amdgpu_atcs_functions {
|
||||
bool get_ext_state;
|
||||
bool pcie_perf_req;
|
||||
@ -1466,7 +1430,7 @@ struct amdgpu_device {
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
|
||||
#endif
|
||||
struct amdgpu_atif atif;
|
||||
struct amdgpu_atif *atif;
|
||||
struct amdgpu_atcs atcs;
|
||||
struct mutex srbm_mutex;
|
||||
/* GRBM index mutex. Protects concurrent access to GRBM index */
|
||||
@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
|
||||
static inline bool amdgpu_has_atpx(void) { return false; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
|
||||
void *amdgpu_atpx_get_dhandle(void);
|
||||
#else
|
||||
static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* KMS
|
||||
*/
|
||||
|
@ -34,6 +34,45 @@
|
||||
#include "amd_acpi.h"
|
||||
#include "atom.h"
|
||||
|
||||
struct amdgpu_atif_notification_cfg {
|
||||
bool enabled;
|
||||
int command_code;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_notifications {
|
||||
bool display_switch;
|
||||
bool expansion_mode_change;
|
||||
bool thermal_state;
|
||||
bool forced_power_state;
|
||||
bool system_power_state;
|
||||
bool display_conf_change;
|
||||
bool px_gfx_switch;
|
||||
bool brightness_change;
|
||||
bool dgpu_display_event;
|
||||
};
|
||||
|
||||
struct amdgpu_atif_functions {
|
||||
bool system_params;
|
||||
bool sbios_requests;
|
||||
bool select_active_disp;
|
||||
bool lid_state;
|
||||
bool get_tv_standard;
|
||||
bool set_tv_standard;
|
||||
bool get_panel_expansion_mode;
|
||||
bool set_panel_expansion_mode;
|
||||
bool temperature_change;
|
||||
bool graphics_device_types;
|
||||
};
|
||||
|
||||
struct amdgpu_atif {
|
||||
acpi_handle handle;
|
||||
|
||||
struct amdgpu_atif_notifications notifications;
|
||||
struct amdgpu_atif_functions functions;
|
||||
struct amdgpu_atif_notification_cfg notification_cfg;
|
||||
struct amdgpu_encoder *encoder_for_bl;
|
||||
};
|
||||
|
||||
/* Call the ATIF method
|
||||
*/
|
||||
/**
|
||||
@ -46,8 +85,9 @@
|
||||
* Executes the requested ATIF function (all asics).
|
||||
* Returns a pointer to the acpi output buffer.
|
||||
*/
|
||||
static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
|
||||
struct acpi_buffer *params)
|
||||
static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
|
||||
int function,
|
||||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object atif_arg_elements[2];
|
||||
@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
|
||||
atif_arg_elements[1].integer.value = 0;
|
||||
}
|
||||
|
||||
status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
|
||||
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
|
||||
&buffer);
|
||||
|
||||
/* Fail only if calling the method fails and ATIF is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
|
||||
* (all asics).
|
||||
* returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_verify_interface(acpi_handle handle,
|
||||
struct amdgpu_atif *atif)
|
||||
static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct atif_verify_interface output;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
|
||||
info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
@ -176,6 +216,35 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
|
||||
{
|
||||
acpi_handle handle = NULL;
|
||||
char acpi_method_name[255] = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
|
||||
acpi_status status;
|
||||
|
||||
/* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
|
||||
* systems, ATIF is in the dGPU's namespace.
|
||||
*/
|
||||
status = acpi_get_handle(dhandle, "ATIF", &handle);
|
||||
if (ACPI_SUCCESS(status))
|
||||
goto out;
|
||||
|
||||
if (amdgpu_has_atpx()) {
|
||||
status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
|
||||
&handle);
|
||||
if (ACPI_SUCCESS(status))
|
||||
goto out;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("No ATIF handle found\n");
|
||||
return NULL;
|
||||
out:
|
||||
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
|
||||
return handle;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_atif_get_notification_params - determine notify configuration
|
||||
*
|
||||
@ -188,15 +257,16 @@ out:
|
||||
* where n is specified in the result if a notifier is used.
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_get_notification_params(acpi_handle handle,
|
||||
struct amdgpu_atif_notification_cfg *n)
|
||||
static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
|
||||
{
|
||||
union acpi_object *info;
|
||||
struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
|
||||
struct atif_system_params params;
|
||||
size_t size;
|
||||
int err = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
|
||||
info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
|
||||
NULL);
|
||||
if (!info) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
@ -250,14 +320,15 @@ out:
|
||||
* (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
|
||||
struct atif_sbios_requests *req)
|
||||
static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
|
||||
struct atif_sbios_requests *req)
|
||||
{
|
||||
union acpi_object *info;
|
||||
size_t size;
|
||||
int count = 0;
|
||||
|
||||
info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
|
||||
info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
|
||||
NULL);
|
||||
if (!info)
|
||||
return -EIO;
|
||||
|
||||
@ -290,11 +361,10 @@ out:
|
||||
* Returns NOTIFY code
|
||||
*/
|
||||
static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
struct acpi_bus_event *event)
|
||||
struct acpi_bus_event *event)
|
||||
{
|
||||
struct amdgpu_atif *atif = &adev->atif;
|
||||
struct amdgpu_atif *atif = adev->atif;
|
||||
struct atif_sbios_requests req;
|
||||
acpi_handle handle;
|
||||
int count;
|
||||
|
||||
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
|
||||
@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!atif->notification_cfg.enabled ||
|
||||
if (!atif ||
|
||||
!atif->notification_cfg.enabled ||
|
||||
event->type != atif->notification_cfg.command_code)
|
||||
/* Not our event */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Check pending SBIOS requests */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
count = amdgpu_atif_get_sbios_requests(handle, &req);
|
||||
count = amdgpu_atif_get_sbios_requests(atif, &req);
|
||||
|
||||
if (count <= 0)
|
||||
return NOTIFY_DONE;
|
||||
@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
|
||||
*/
|
||||
int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||
{
|
||||
acpi_handle handle;
|
||||
struct amdgpu_atif *atif = &adev->atif;
|
||||
acpi_handle handle, atif_handle;
|
||||
struct amdgpu_atif *atif;
|
||||
struct amdgpu_atcs *atcs = &adev->atcs;
|
||||
int ret;
|
||||
|
||||
@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
|
||||
}
|
||||
|
||||
/* Call the ATIF method */
|
||||
ret = amdgpu_atif_verify_interface(handle, atif);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
|
||||
/* Probe for ATIF, and initialize it if found */
|
||||
atif_handle = amdgpu_atif_probe_handle(handle);
|
||||
if (!atif_handle)
|
||||
goto out;
|
||||
|
||||
atif = kzalloc(sizeof(*atif), GFP_KERNEL);
|
||||
if (!atif) {
|
||||
DRM_WARN("Not enough memory to initialize ATIF\n");
|
||||
goto out;
|
||||
}
|
||||
atif->handle = atif_handle;
|
||||
|
||||
/* Call the ATIF method */
|
||||
ret = amdgpu_atif_verify_interface(atif);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
|
||||
kfree(atif);
|
||||
goto out;
|
||||
}
|
||||
adev->atif = atif;
|
||||
|
||||
if (atif->notifications.brightness_change) {
|
||||
struct drm_encoder *tmp;
|
||||
@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (atif->functions.system_params) {
|
||||
ret = amdgpu_atif_get_notification_params(handle,
|
||||
&atif->notification_cfg);
|
||||
ret = amdgpu_atif_get_notification_params(atif);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
|
||||
ret);
|
||||
@ -720,4 +803,6 @@ out:
|
||||
void amdgpu_acpi_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unregister_acpi_notifier(&adev->acpi_nb);
|
||||
if (adev->atif)
|
||||
kfree(adev->atif);
|
||||
}
|
||||
|
@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
|
||||
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ACPI)
|
||||
void *amdgpu_atpx_get_dhandle(void) {
|
||||
return amdgpu_atpx_priv.dhandle;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* amdgpu_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
|
||||
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
||||
|
||||
/* wrap the last IB with fence */
|
||||
if (job && job->uf_addr) {
|
||||
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
||||
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
r = amdgpu_fence_emit(ring, f, fence_flags);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||
@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (ring->funcs->insert_end)
|
||||
ring->funcs->insert_end(ring);
|
||||
|
||||
/* wrap the last IB with fence */
|
||||
if (job && job->uf_addr) {
|
||||
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
|
||||
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
|
||||
}
|
||||
|
||||
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
||||
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
||||
|
||||
|
@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
|
||||
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
|
||||
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
|
||||
|
@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
.emit_frame_size =
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
|
||||
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
|
||||
.emit_ib = amdgpu_vce_ring_emit_ib,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
|
||||
6 + /* vce_v3_0_emit_vm_flush */
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
|
||||
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
|
||||
.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
|
||||
.emit_ib = vce_v3_0_ring_emit_ib,
|
||||
.emit_vm_flush = vce_v3_0_emit_vm_flush,
|
||||
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
|
||||
|
@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
|
||||
return color_space;
|
||||
}
|
||||
|
||||
static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
|
||||
{
|
||||
if (timing_out->display_color_depth <= COLOR_DEPTH_888)
|
||||
return;
|
||||
|
||||
timing_out->display_color_depth--;
|
||||
}
|
||||
|
||||
static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
|
||||
const struct drm_display_info *info)
|
||||
{
|
||||
int normalized_clk;
|
||||
if (timing_out->display_color_depth <= COLOR_DEPTH_888)
|
||||
return;
|
||||
do {
|
||||
normalized_clk = timing_out->pix_clk_khz;
|
||||
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
||||
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
normalized_clk /= 2;
|
||||
/* Adjusting pix clock following on HDMI spec based on colour depth */
|
||||
switch (timing_out->display_color_depth) {
|
||||
case COLOR_DEPTH_101010:
|
||||
normalized_clk = (normalized_clk * 30) / 24;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
normalized_clk = (normalized_clk * 36) / 24;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
normalized_clk = (normalized_clk * 48) / 24;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
if (normalized_clk <= info->max_tmds_clock)
|
||||
return;
|
||||
reduce_mode_colour_depth(timing_out);
|
||||
|
||||
} while (timing_out->display_color_depth > COLOR_DEPTH_888);
|
||||
|
||||
}
|
||||
/*****************************************************************************/
|
||||
|
||||
static void
|
||||
@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||
const struct drm_connector *connector)
|
||||
{
|
||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
|
||||
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
|
||||
|
||||
@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||
timing_out->v_border_top = 0;
|
||||
timing_out->v_border_bottom = 0;
|
||||
/* TODO: un-hardcode */
|
||||
|
||||
if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
|
||||
if (drm_mode_is_420_only(info, mode_in)
|
||||
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
|
||||
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
|
||||
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
|
||||
else
|
||||
@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
|
||||
|
||||
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
||||
if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
adjust_colour_depth_from_display_info(timing_out, info);
|
||||
}
|
||||
|
||||
static void fill_audio_info(struct audio_info *audio_info,
|
||||
|
@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
|
||||
uint8_t acggfxclkspreadpercent;
|
||||
uint16_t acggfxclkspreadfreq;
|
||||
|
||||
uint32_t boardreserved[10];
|
||||
uint8_t Vr2_I2C_address;
|
||||
uint8_t padding_vr2[3];
|
||||
|
||||
uint32_t boardreserved[9];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
|
||||
struct pp_atomfwctrl_bios_boot_up_values *boot_values,
|
||||
struct atom_firmware_info_v3_2 *fw_info)
|
||||
{
|
||||
uint32_t frequency = 0;
|
||||
|
||||
boot_values->ulRevision = fw_info->firmware_revision;
|
||||
boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
|
||||
boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
|
||||
boot_values->usVddc = fw_info->bootup_vddc_mv;
|
||||
boot_values->usVddci = fw_info->bootup_vddci_mv;
|
||||
boot_values->usMvddc = fw_info->bootup_mvddc_mv;
|
||||
boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
|
||||
boot_values->ucCoolingID = fw_info->coolingsolution_id;
|
||||
boot_values->ulSocClk = 0;
|
||||
boot_values->ulDCEFClk = 0;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
|
||||
boot_values->ulSocClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
|
||||
boot_values->ulDCEFClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
|
||||
boot_values->ulEClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
|
||||
boot_values->ulVClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
|
||||
boot_values->ulDClk = frequency;
|
||||
}
|
||||
|
||||
static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
|
||||
struct pp_atomfwctrl_bios_boot_up_values *boot_values,
|
||||
struct atom_firmware_info_v3_1 *fw_info)
|
||||
{
|
||||
uint32_t frequency = 0;
|
||||
|
||||
boot_values->ulRevision = fw_info->firmware_revision;
|
||||
boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
|
||||
boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
|
||||
boot_values->usVddc = fw_info->bootup_vddc_mv;
|
||||
boot_values->usVddci = fw_info->bootup_vddci_mv;
|
||||
boot_values->usMvddc = fw_info->bootup_mvddc_mv;
|
||||
boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
|
||||
boot_values->ucCoolingID = fw_info->coolingsolution_id;
|
||||
boot_values->ulSocClk = 0;
|
||||
boot_values->ulDCEFClk = 0;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
|
||||
boot_values->ulSocClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
|
||||
boot_values->ulDCEFClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
|
||||
boot_values->ulEClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
|
||||
boot_values->ulVClk = frequency;
|
||||
|
||||
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
|
||||
boot_values->ulDClk = frequency;
|
||||
}
|
||||
|
||||
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
|
||||
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
|
||||
{
|
||||
struct atom_firmware_info_v3_1 *info = NULL;
|
||||
struct atom_firmware_info_v3_2 *fwinfo_3_2;
|
||||
struct atom_firmware_info_v3_1 *fwinfo_3_1;
|
||||
struct atom_common_table_header *info = NULL;
|
||||
uint16_t ix;
|
||||
|
||||
ix = GetIndexIntoMasterDataTable(firmwareinfo);
|
||||
info = (struct atom_firmware_info_v3_1 *)
|
||||
info = (struct atom_common_table_header *)
|
||||
smu_atom_get_data_table(hwmgr->adev,
|
||||
ix, NULL, NULL, NULL);
|
||||
|
||||
@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
boot_values->ulRevision = info->firmware_revision;
|
||||
boot_values->ulGfxClk = info->bootup_sclk_in10khz;
|
||||
boot_values->ulUClk = info->bootup_mclk_in10khz;
|
||||
boot_values->usVddc = info->bootup_vddc_mv;
|
||||
boot_values->usVddci = info->bootup_vddci_mv;
|
||||
boot_values->usMvddc = info->bootup_mvddc_mv;
|
||||
boot_values->usVddGfx = info->bootup_vddgfx_mv;
|
||||
boot_values->ucCoolingID = info->coolingsolution_id;
|
||||
boot_values->ulSocClk = 0;
|
||||
boot_values->ulDCEFClk = 0;
|
||||
if ((info->format_revision == 3) && (info->content_revision == 2)) {
|
||||
fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
|
||||
pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
|
||||
boot_values, fwinfo_3_2);
|
||||
} else if ((info->format_revision == 3) && (info->content_revision == 1)) {
|
||||
fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
|
||||
pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
|
||||
boot_values, fwinfo_3_1);
|
||||
} else {
|
||||
pr_info("Fw info table revision does not match!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
|
||||
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
|
||||
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
|
||||
|
||||
param->Vr2_I2C_address = info->Vr2_I2C_address;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
|
||||
uint32_t ulUClk;
|
||||
uint32_t ulSocClk;
|
||||
uint32_t ulDCEFClk;
|
||||
uint32_t ulEClk;
|
||||
uint32_t ulVClk;
|
||||
uint32_t ulDClk;
|
||||
uint16_t usVddc;
|
||||
uint16_t usVddci;
|
||||
uint16_t usMvddc;
|
||||
@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
|
||||
uint8_t acggfxclkspreadenabled;
|
||||
uint8_t acggfxclkspreadpercent;
|
||||
uint16_t acggfxclkspreadfreq;
|
||||
|
||||
uint8_t Vr2_I2C_address;
|
||||
};
|
||||
|
||||
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
|
||||
|
@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
|
||||
|
||||
data->registry_data.disallowed_features = 0x0;
|
||||
data->registry_data.od_state_in_dc_support = 0;
|
||||
data->registry_data.thermal_support = 1;
|
||||
data->registry_data.skip_baco_hardware = 0;
|
||||
|
||||
data->registry_data.log_avfs_param = 0;
|
||||
@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
|
||||
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
|
||||
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
|
||||
data->vbios_boot_state.eclock = boot_up_values.ulEClk;
|
||||
data->vbios_boot_state.dclock = boot_up_values.ulDClk;
|
||||
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetMinDeepSleepDcefclk,
|
||||
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
|
||||
|
@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
|
||||
uint32_t mem_clock;
|
||||
uint32_t soc_clock;
|
||||
uint32_t dcef_clock;
|
||||
uint32_t eclock;
|
||||
uint32_t dclock;
|
||||
uint32_t vclock;
|
||||
};
|
||||
|
||||
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
|
||||
|
@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
|
||||
ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
|
||||
}
|
||||
|
||||
ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -499,7 +499,10 @@ typedef struct {
|
||||
uint8_t AcgGfxclkSpreadPercent;
|
||||
uint16_t AcgGfxclkSpreadFreq;
|
||||
|
||||
uint32_t BoardReserved[10];
|
||||
uint8_t Vr2_I2C_address;
|
||||
uint8_t padding_vr2[3];
|
||||
|
||||
uint32_t BoardReserved[9];
|
||||
|
||||
|
||||
uint32_t MmHubPadding[7];
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <drm/bridge/mhl.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
@ -72,9 +73,7 @@ struct sii8620 {
|
||||
struct regulator_bulk_data supplies[2];
|
||||
struct mutex lock; /* context lock, protects fields below */
|
||||
int error;
|
||||
int pixel_clock;
|
||||
unsigned int use_packed_pixel:1;
|
||||
int video_code;
|
||||
enum sii8620_mode mode;
|
||||
enum sii8620_sink_type sink_type;
|
||||
u8 cbus_status;
|
||||
@ -82,7 +81,6 @@ struct sii8620 {
|
||||
u8 xstat[MHL_XDS_SIZE];
|
||||
u8 devcap[MHL_DCAP_SIZE];
|
||||
u8 xdevcap[MHL_XDC_SIZE];
|
||||
u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
|
||||
bool feature_complete;
|
||||
bool devcap_read;
|
||||
bool sink_detected;
|
||||
@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx)
|
||||
|
||||
static void sii8620_set_format(struct sii8620 *ctx)
|
||||
{
|
||||
u8 out_fmt;
|
||||
|
||||
if (sii8620_is_mhl3(ctx)) {
|
||||
sii8620_setbits(ctx, REG_M3_P0CTRL,
|
||||
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
|
||||
ctx->use_packed_pixel ? ~0 : 0);
|
||||
} else {
|
||||
if (ctx->use_packed_pixel) {
|
||||
sii8620_write_seq_static(ctx,
|
||||
REG_VID_MODE, BIT_VID_MODE_M1080P,
|
||||
REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
|
||||
REG_MHLTX_CTL6, 0x60
|
||||
);
|
||||
} else {
|
||||
sii8620_write_seq_static(ctx,
|
||||
REG_VID_MODE, 0,
|
||||
REG_MHL_TOP_CTL, 1,
|
||||
REG_MHLTX_CTL6, 0xa0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->use_packed_pixel)
|
||||
out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
|
||||
else
|
||||
out_fmt = VAL_TPI_FORMAT(RGB, FULL);
|
||||
|
||||
sii8620_write_seq(ctx,
|
||||
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
|
||||
REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
|
||||
REG_TPI_OUTPUT, out_fmt,
|
||||
);
|
||||
}
|
||||
|
||||
@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
|
||||
return frm_len;
|
||||
}
|
||||
|
||||
static void sii8620_set_infoframes(struct sii8620 *ctx)
|
||||
static void sii8620_set_infoframes(struct sii8620 *ctx,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct mhl3_infoframe mhl_frm;
|
||||
union hdmi_infoframe frm;
|
||||
u8 buf[31];
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
|
||||
mode,
|
||||
true);
|
||||
if (ctx->use_packed_pixel)
|
||||
frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
|
||||
|
||||
if (!ret)
|
||||
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
|
||||
if (ret > 0)
|
||||
sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
|
||||
|
||||
if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
|
||||
sii8620_write(ctx, REG_TPI_SC,
|
||||
BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
|
||||
sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
|
||||
ARRAY_SIZE(ctx->avif) - 3);
|
||||
sii8620_write(ctx, REG_PKT_FILTER_0,
|
||||
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
|
||||
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
|
||||
@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = hdmi_avi_infoframe_init(&frm.avi);
|
||||
frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
|
||||
frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
|
||||
frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
|
||||
frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||
frm.avi.video_code = ctx->video_code;
|
||||
if (!ret)
|
||||
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
|
||||
if (ret > 0)
|
||||
sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
|
||||
sii8620_write(ctx, REG_PKT_FILTER_0,
|
||||
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
|
||||
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
|
||||
@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
|
||||
|
||||
static void sii8620_start_video(struct sii8620 *ctx)
|
||||
{
|
||||
struct drm_display_mode *mode =
|
||||
&ctx->bridge.encoder->crtc->state->adjusted_mode;
|
||||
|
||||
if (!sii8620_is_mhl3(ctx))
|
||||
sii8620_stop_video(ctx);
|
||||
|
||||
@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
|
||||
sii8620_set_format(ctx);
|
||||
|
||||
if (!sii8620_is_mhl3(ctx)) {
|
||||
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
|
||||
MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
|
||||
u8 link_mode = MHL_DST_LM_PATH_ENABLED;
|
||||
|
||||
if (ctx->use_packed_pixel)
|
||||
link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
|
||||
else
|
||||
link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
|
||||
|
||||
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
|
||||
sii8620_set_auto_zone(ctx);
|
||||
} else {
|
||||
static const struct {
|
||||
@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
|
||||
MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
|
||||
};
|
||||
u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
|
||||
int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
|
||||
int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
|
||||
@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
|
||||
clk_spec[i].link_rate);
|
||||
}
|
||||
|
||||
sii8620_set_infoframes(ctx);
|
||||
sii8620_set_infoframes(ctx, mode);
|
||||
}
|
||||
|
||||
static void sii8620_disable_hpd(struct sii8620 *ctx)
|
||||
@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
|
||||
|
||||
static void sii8620_status_changed_path(struct sii8620 *ctx)
|
||||
{
|
||||
if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
|
||||
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
|
||||
MHL_DST_LM_CLK_MODE_NORMAL
|
||||
| MHL_DST_LM_PATH_ENABLED);
|
||||
} else {
|
||||
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
|
||||
MHL_DST_LM_CLK_MODE_NORMAL);
|
||||
}
|
||||
u8 link_mode;
|
||||
|
||||
if (ctx->use_packed_pixel)
|
||||
link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
|
||||
else
|
||||
link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
|
||||
|
||||
if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
|
||||
link_mode |= MHL_DST_LM_PATH_ENABLED;
|
||||
|
||||
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
|
||||
link_mode);
|
||||
}
|
||||
|
||||
static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
|
||||
@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
|
||||
mutex_lock(&ctx->lock);
|
||||
|
||||
ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
|
||||
ctx->video_code = drm_match_cea_mode(adjusted_mode);
|
||||
ctx->pixel_clock = adjusted_mode->clock;
|
||||
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
|
@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
|
||||
|
||||
drm_mode_object_unregister(blob->dev, &blob->base);
|
||||
|
||||
kfree(blob);
|
||||
kvfree(blob);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
|
||||
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
|
||||
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
|
||||
if (!blob)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
|
||||
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
|
||||
true, drm_property_free_blob);
|
||||
if (ret) {
|
||||
kfree(blob);
|
||||
kvfree(blob);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
|
||||
unsigned long val;
|
||||
|
||||
val = readl(ctx->addr + DECON_WINCONx(win));
|
||||
val &= ~WINCONx_BPPMODE_MASK;
|
||||
val &= WINCONx_ENWIN_F;
|
||||
|
||||
switch (fb->format->format) {
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
||||
writel(val, ctx->addr + DECON_VIDOSDxB(win));
|
||||
}
|
||||
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
|
||||
VIDOSD_Wx_ALPHA_B_F(0x0);
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
|
||||
VIDOSD_Wx_ALPHA_B_F(0xff);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxC(win));
|
||||
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
|
||||
|
@ -420,7 +420,7 @@ err_mode_config_cleanup:
|
||||
err_free_private:
|
||||
kfree(private);
|
||||
err_free_drm:
|
||||
drm_dev_unref(drm);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev)
|
||||
drm->dev_private = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
drm_dev_unref(drm);
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops exynos_drm_ops = {
|
||||
|
@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
|
||||
err:
|
||||
while (i--)
|
||||
drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
|
||||
drm_gem_object_put_unlocked(&exynos_gem[i]->base);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
|
||||
static void fimc_set_window(struct fimc_context *ctx,
|
||||
struct exynos_drm_ipp_buffer *buf)
|
||||
{
|
||||
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
|
||||
u32 cfg, h1, h2, v1, v2;
|
||||
|
||||
/* cropped image */
|
||||
h1 = buf->rect.x;
|
||||
h2 = buf->buf.width - buf->rect.w - buf->rect.x;
|
||||
h2 = real_width - buf->rect.w - buf->rect.x;
|
||||
v1 = buf->rect.y;
|
||||
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
|
||||
|
||||
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
|
||||
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
|
||||
buf->buf.width, buf->buf.height);
|
||||
real_width, buf->buf.height);
|
||||
DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
|
||||
|
||||
/*
|
||||
@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
|
||||
static void fimc_src_set_size(struct fimc_context *ctx,
|
||||
struct exynos_drm_ipp_buffer *buf)
|
||||
{
|
||||
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
|
||||
u32 cfg;
|
||||
|
||||
DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
|
||||
DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
|
||||
|
||||
/* original size */
|
||||
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
|
||||
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
|
||||
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
|
||||
|
||||
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
|
||||
@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
|
||||
* for now, we support only ITU601 8 bit mode
|
||||
*/
|
||||
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
|
||||
EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
|
||||
EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
|
||||
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
|
||||
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
|
||||
|
||||
@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
|
||||
static void fimc_dst_set_size(struct fimc_context *ctx,
|
||||
struct exynos_drm_ipp_buffer *buf)
|
||||
{
|
||||
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
|
||||
u32 cfg, cfg_ext;
|
||||
|
||||
DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
|
||||
DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
|
||||
|
||||
/* original size */
|
||||
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
|
||||
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
|
||||
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
|
||||
|
||||
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
|
||||
|
@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
|
||||
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
|
||||
|
||||
/* drop reference from allocate - handle holds it now. */
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
|
||||
|
||||
exynos_gem = to_exynos_gem(obj);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
return exynos_gem->size;
|
||||
}
|
||||
@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
|
||||
return;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
/*
|
||||
* decrease obj->refcount one more time because we has already
|
||||
* increased it at exynos_drm_gem_get_dma_addr().
|
||||
*/
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
}
|
||||
|
||||
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
|
||||
@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
|
||||
args->flags = exynos_gem->flags;
|
||||
args->size = exynos_gem->size;
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
|
||||
GSC_IN_CHROMA_ORDER_CRCB);
|
||||
break;
|
||||
case DRM_FORMAT_NV21:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
|
||||
GSC_IN_YUV420_2P);
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
|
||||
break;
|
||||
case DRM_FORMAT_YUV422:
|
||||
cfg |= GSC_IN_YUV422_3P;
|
||||
break;
|
||||
case DRM_FORMAT_YUV420:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_YVU420:
|
||||
cfg |= GSC_IN_YUV420_3P;
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_NV12:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV16:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
|
||||
GSC_IN_YUV420_2P);
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
|
||||
|
||||
switch (degree) {
|
||||
case DRM_MODE_ROTATE_0:
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg |= GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_X)
|
||||
cfg |= GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg |= GSC_IN_ROT_YFLIP;
|
||||
break;
|
||||
case DRM_MODE_ROTATE_90:
|
||||
cfg |= GSC_IN_ROT_90;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg |= GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_X)
|
||||
cfg |= GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg |= GSC_IN_ROT_YFLIP;
|
||||
break;
|
||||
case DRM_MODE_ROTATE_180:
|
||||
cfg |= GSC_IN_ROT_180;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg &= ~GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_X)
|
||||
cfg &= ~GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg &= ~GSC_IN_ROT_YFLIP;
|
||||
break;
|
||||
case DRM_MODE_ROTATE_270:
|
||||
cfg |= GSC_IN_ROT_270;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg &= ~GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_X)
|
||||
cfg &= ~GSC_IN_ROT_XFLIP;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
cfg &= ~GSC_IN_ROT_YFLIP;
|
||||
break;
|
||||
}
|
||||
@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
|
||||
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
|
||||
GSC_SRCIMG_WIDTH_MASK);
|
||||
|
||||
cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
|
||||
cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
|
||||
GSC_SRCIMG_HEIGHT(buf->buf.height));
|
||||
|
||||
gsc_write(cfg, GSC_SRCIMG_SIZE);
|
||||
@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
|
||||
GSC_OUT_CHROMA_ORDER_CRCB);
|
||||
break;
|
||||
case DRM_FORMAT_NV21:
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
|
||||
break;
|
||||
case DRM_FORMAT_YUV422:
|
||||
cfg |= GSC_OUT_YUV422_3P;
|
||||
break;
|
||||
case DRM_FORMAT_YUV420:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_YVU420:
|
||||
cfg |= GSC_OUT_YUV420_3P;
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_NV12:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV16:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
|
||||
GSC_OUT_YUV420_2P);
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
|
||||
/* original size */
|
||||
cfg = gsc_read(GSC_DSTIMG_SIZE);
|
||||
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
|
||||
cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
|
||||
cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
|
||||
GSC_DSTIMG_HEIGHT(buf->buf.height);
|
||||
gsc_write(cfg, GSC_DSTIMG_SIZE);
|
||||
|
||||
@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
|
||||
};
|
||||
|
||||
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
|
||||
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
|
||||
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
|
||||
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
|
||||
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
|
||||
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
|
||||
|
@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* basic checks */
|
||||
if (buf->buf.width == 0 || buf->buf.height == 0)
|
||||
return -EINVAL;
|
||||
buf->format = drm_format_info(buf->buf.fourcc);
|
||||
for (i = 0; i < buf->format->num_planes; i++) {
|
||||
unsigned int width = (i == 0) ? buf->buf.width :
|
||||
DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
|
||||
|
||||
if (buf->buf.pitch[i] == 0)
|
||||
buf->buf.pitch[i] = width * buf->format->cpp[i];
|
||||
if (buf->buf.pitch[i] < width * buf->format->cpp[i])
|
||||
return -EINVAL;
|
||||
if (!buf->buf.gem_id[i])
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* pitch for additional planes must match */
|
||||
if (buf->format->num_planes > 2 &&
|
||||
buf->buf.pitch[1] != buf->buf.pitch[2])
|
||||
return -EINVAL;
|
||||
|
||||
/* get GEM buffers and check their size */
|
||||
for (i = 0; i < buf->format->num_planes; i++) {
|
||||
unsigned int height = (i == 0) ? buf->buf.height :
|
||||
@ -428,7 +407,7 @@ enum drm_ipp_size_id {
|
||||
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
|
||||
};
|
||||
|
||||
static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
|
||||
static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
|
||||
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
|
||||
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
|
||||
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
|
||||
@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
|
||||
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
|
||||
struct drm_ipp_limit l;
|
||||
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
|
||||
int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
|
||||
|
||||
if (!limits)
|
||||
return 0;
|
||||
|
||||
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
|
||||
if (!__size_limit_check(buf->buf.width, &l.h) ||
|
||||
if (!__size_limit_check(real_width, &l.h) ||
|
||||
!__size_limit_check(buf->buf.height, &l.v))
|
||||
return -EINVAL;
|
||||
|
||||
@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
|
||||
struct exynos_drm_ipp_buffer *buf,
|
||||
struct exynos_drm_ipp_buffer *src,
|
||||
struct exynos_drm_ipp_buffer *dst,
|
||||
bool rotate, bool swap)
|
||||
{
|
||||
const struct exynos_drm_ipp_formats *fmt;
|
||||
int ret, i;
|
||||
|
||||
fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
|
||||
buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
|
||||
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
|
||||
if (!fmt) {
|
||||
DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
|
||||
buf == src ? "src" : "dst");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* basic checks */
|
||||
if (buf->buf.width == 0 || buf->buf.height == 0)
|
||||
return -EINVAL;
|
||||
|
||||
buf->format = drm_format_info(buf->buf.fourcc);
|
||||
for (i = 0; i < buf->format->num_planes; i++) {
|
||||
unsigned int width = (i == 0) ? buf->buf.width :
|
||||
DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
|
||||
|
||||
if (buf->buf.pitch[i] == 0)
|
||||
buf->buf.pitch[i] = width * buf->format->cpp[i];
|
||||
if (buf->buf.pitch[i] < width * buf->format->cpp[i])
|
||||
return -EINVAL;
|
||||
if (!buf->buf.gem_id[i])
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* pitch for additional planes must match */
|
||||
if (buf->format->num_planes > 2 &&
|
||||
buf->buf.pitch[1] != buf->buf.pitch[2])
|
||||
return -EINVAL;
|
||||
|
||||
/* check driver limits */
|
||||
ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
|
||||
fmt->num_limits,
|
||||
rotate,
|
||||
buf == dst ? swap : false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
|
||||
fmt->limits,
|
||||
fmt->num_limits, swap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
|
||||
{
|
||||
struct exynos_drm_ipp *ipp = task->ipp;
|
||||
const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
|
||||
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
|
||||
unsigned int rotation = task->transform.rotation;
|
||||
int ret = 0;
|
||||
@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
|
||||
DRM_EXYNOS_IPP_FORMAT_SOURCE);
|
||||
if (!src_fmt) {
|
||||
DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
|
||||
src_fmt->num_limits,
|
||||
rotate, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
|
||||
src_fmt->limits,
|
||||
src_fmt->num_limits, swap);
|
||||
ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
|
||||
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
|
||||
if (!dst_fmt) {
|
||||
DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
|
||||
dst_fmt->num_limits,
|
||||
false, swap);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
|
||||
dst_fmt->limits,
|
||||
dst_fmt->num_limits, swap);
|
||||
ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
|
||||
if (plane->state) {
|
||||
exynos_state = to_exynos_plane_state(plane->state);
|
||||
if (exynos_state->base.fb)
|
||||
drm_framebuffer_unreference(exynos_state->base.fb);
|
||||
drm_framebuffer_put(exynos_state->base.fb);
|
||||
kfree(exynos_state);
|
||||
plane->state = NULL;
|
||||
}
|
||||
|
@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
|
||||
val &= ~ROT_CONTROL_FLIP_MASK;
|
||||
|
||||
if (rotation & DRM_MODE_REFLECT_X)
|
||||
val |= ROT_CONTROL_FLIP_HORIZONTAL;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
val |= ROT_CONTROL_FLIP_VERTICAL;
|
||||
if (rotation & DRM_MODE_REFLECT_Y)
|
||||
val |= ROT_CONTROL_FLIP_HORIZONTAL;
|
||||
|
||||
val &= ~ROT_CONTROL_ROT_MASK;
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
|
||||
#define SCALER_MAX_CLK 4
|
||||
#define SCALER_AUTOSUSPEND_DELAY 2000
|
||||
#define SCALER_RESET_WAIT_RETRIES 100
|
||||
|
||||
struct scaler_data {
|
||||
const char *clk_name[SCALER_MAX_CLK];
|
||||
@ -51,9 +52,9 @@ struct scaler_context {
|
||||
static u32 scaler_get_format(u32 drm_fmt)
|
||||
{
|
||||
switch (drm_fmt) {
|
||||
case DRM_FORMAT_NV21:
|
||||
return SCALER_YUV420_2P_UV;
|
||||
case DRM_FORMAT_NV12:
|
||||
return SCALER_YUV420_2P_UV;
|
||||
case DRM_FORMAT_NV21:
|
||||
return SCALER_YUV420_2P_VU;
|
||||
case DRM_FORMAT_YUV420:
|
||||
return SCALER_YUV420_3P;
|
||||
@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
|
||||
return SCALER_YUV422_1P_UYVY;
|
||||
case DRM_FORMAT_YVYU:
|
||||
return SCALER_YUV422_1P_YVYU;
|
||||
case DRM_FORMAT_NV61:
|
||||
return SCALER_YUV422_2P_UV;
|
||||
case DRM_FORMAT_NV16:
|
||||
return SCALER_YUV422_2P_UV;
|
||||
case DRM_FORMAT_NV61:
|
||||
return SCALER_YUV422_2P_VU;
|
||||
case DRM_FORMAT_YUV422:
|
||||
return SCALER_YUV422_3P;
|
||||
case DRM_FORMAT_NV42:
|
||||
return SCALER_YUV444_2P_UV;
|
||||
case DRM_FORMAT_NV24:
|
||||
return SCALER_YUV444_2P_UV;
|
||||
case DRM_FORMAT_NV42:
|
||||
return SCALER_YUV444_2P_VU;
|
||||
case DRM_FORMAT_YUV444:
|
||||
return SCALER_YUV444_3P;
|
||||
@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int scaler_reset(struct scaler_context *scaler)
|
||||
{
|
||||
int retry = SCALER_RESET_WAIT_RETRIES;
|
||||
|
||||
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
|
||||
do {
|
||||
cpu_relax();
|
||||
} while (retry > 1 &&
|
||||
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
|
||||
do {
|
||||
cpu_relax();
|
||||
scaler_write(1, SCALER_INT_EN);
|
||||
} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
|
||||
|
||||
return retry ? 0 : -EIO;
|
||||
}
|
||||
|
||||
static inline void scaler_enable_int(struct scaler_context *scaler)
|
||||
{
|
||||
u32 val;
|
||||
@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
|
||||
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
|
||||
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
|
||||
|
||||
scaler->task = task;
|
||||
|
||||
pm_runtime_get_sync(scaler->dev);
|
||||
if (scaler_reset(scaler)) {
|
||||
pm_runtime_put(scaler->dev);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
scaler->task = task;
|
||||
|
||||
scaler_set_src_fmt(scaler, src_fmt);
|
||||
scaler_set_src_base(scaler, &task->src);
|
||||
@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
|
||||
|
||||
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
|
||||
{
|
||||
return scaler_read(SCALER_INT_STATUS);
|
||||
u32 val = scaler_read(SCALER_INT_STATUS);
|
||||
|
||||
scaler_write(val, SCALER_INT_STATUS);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int scaler_task_done(u32 val)
|
||||
|
@ -138,6 +138,7 @@
|
||||
#define GSC_OUT_YUV420_3P (3 << 4)
|
||||
#define GSC_OUT_YUV422_1P (4 << 4)
|
||||
#define GSC_OUT_YUV422_2P (5 << 4)
|
||||
#define GSC_OUT_YUV422_3P (6 << 4)
|
||||
#define GSC_OUT_YUV444 (7 << 4)
|
||||
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
|
||||
#define GSC_OUT_TILE_C_16x8 (0 << 2)
|
||||
|
@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
|
||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
|
||||
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
|
||||
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
|
@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
|
||||
vgpu_free_mm(mm);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
mm->ggtt_mm.last_partial_off = -1UL;
|
||||
|
||||
return mm;
|
||||
}
|
||||
@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
|
||||
invalidate_ppgtt_mm(mm);
|
||||
} else {
|
||||
vfree(mm->ggtt_mm.virtual_ggtt);
|
||||
mm->ggtt_mm.last_partial_off = -1UL;
|
||||
}
|
||||
|
||||
vgpu_free_mm(mm);
|
||||
@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
|
||||
bytes);
|
||||
|
||||
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
|
||||
* write, we assume the two 4 bytes writes are consecutive.
|
||||
* Otherwise, we abort and report error
|
||||
*/
|
||||
if (bytes < info->gtt_entry_size) {
|
||||
if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
|
||||
/* the first partial part*/
|
||||
ggtt_mm->ggtt_mm.last_partial_off = off;
|
||||
ggtt_mm->ggtt_mm.last_partial_data = e.val64;
|
||||
return 0;
|
||||
} else if ((g_gtt_index ==
|
||||
(ggtt_mm->ggtt_mm.last_partial_off >>
|
||||
info->gtt_entry_size_shift)) &&
|
||||
(off != ggtt_mm->ggtt_mm.last_partial_off)) {
|
||||
/* the second partial part */
|
||||
|
||||
int last_off = ggtt_mm->ggtt_mm.last_partial_off &
|
||||
(info->gtt_entry_size - 1);
|
||||
|
||||
memcpy((void *)&e.val64 + last_off,
|
||||
(void *)&ggtt_mm->ggtt_mm.last_partial_data +
|
||||
last_off, bytes);
|
||||
|
||||
ggtt_mm->ggtt_mm.last_partial_off = -1UL;
|
||||
} else {
|
||||
int last_offset;
|
||||
|
||||
gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
|
||||
ggtt_mm->ggtt_mm.last_partial_off, off,
|
||||
bytes, info->gtt_entry_size);
|
||||
|
||||
/* set host ggtt entry to scratch page and clear
|
||||
* virtual ggtt entry as not present for last
|
||||
* partially write offset
|
||||
*/
|
||||
last_offset = ggtt_mm->ggtt_mm.last_partial_off &
|
||||
(~(info->gtt_entry_size - 1));
|
||||
|
||||
ggtt_get_host_entry(ggtt_mm, &m, last_offset);
|
||||
ggtt_invalidate_pte(vgpu, &m);
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
ops->clear_present(&m);
|
||||
ggtt_set_host_entry(ggtt_mm, &m, last_offset);
|
||||
ggtt_invalidate(gvt->dev_priv);
|
||||
|
||||
ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
|
||||
ops->clear_present(&e);
|
||||
ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
|
||||
|
||||
ggtt_mm->ggtt_mm.last_partial_off = off;
|
||||
ggtt_mm->ggtt_mm.last_partial_data = e.val64;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ops->test_present(&e)) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
m = e;
|
||||
|
@ -150,6 +150,8 @@ struct intel_vgpu_mm {
|
||||
} ppgtt_mm;
|
||||
struct {
|
||||
void *virtual_ggtt;
|
||||
unsigned long last_partial_off;
|
||||
u64 last_partial_data;
|
||||
} ggtt_mm;
|
||||
};
|
||||
};
|
||||
|
@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
|
||||
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
|
||||
struct i915_vma *vma;
|
||||
pgoff_t page_offset;
|
||||
unsigned int flags;
|
||||
int ret;
|
||||
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* If the object is smaller than a couple of partial vma, it is
|
||||
* not worth only creating a single partial vma - we may as well
|
||||
* clear enough space for the full object.
|
||||
*/
|
||||
flags = PIN_MAPPABLE;
|
||||
if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
|
||||
flags |= PIN_NONBLOCK | PIN_NONFAULT;
|
||||
|
||||
/* Now pin it into the GTT as needed */
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK |
|
||||
PIN_NONFAULT);
|
||||
if (IS_ERR(vma)) {
|
||||
/* Use a partial view if it is bigger than available space */
|
||||
struct i915_ggtt_view view =
|
||||
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
|
||||
unsigned int flags;
|
||||
|
||||
/* Userspace is now writing through an untracked VMA, abandon
|
||||
flags = PIN_MAPPABLE;
|
||||
if (view.type == I915_GGTT_VIEW_NORMAL)
|
||||
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
|
||||
|
||||
/*
|
||||
* Userspace is now writing through an untracked VMA, abandon
|
||||
* all hope that the hardware is able to track future writes.
|
||||
*/
|
||||
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
if (IS_ERR(vma) && !view.type) {
|
||||
flags = PIN_MAPPABLE;
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
}
|
||||
}
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
|
@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
obj->base.size >> PAGE_SHIFT));
|
||||
vma->size = view->partial.size;
|
||||
vma->size <<= PAGE_SHIFT;
|
||||
GEM_BUG_ON(vma->size >= obj->base.size);
|
||||
GEM_BUG_ON(vma->size > obj->base.size);
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
vma->size = intel_rotation_info_size(&view->rotated);
|
||||
vma->size <<= PAGE_SHIFT;
|
||||
|
@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||
|
||||
if (cmd > (char *) urb->transfer_buffer) {
|
||||
/* Send partial buffer remaining before exiting */
|
||||
int len = cmd - (char *) urb->transfer_buffer;
|
||||
int len;
|
||||
if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
|
||||
*cmd++ = 0xAF;
|
||||
len = cmd - (char *) urb->transfer_buffer;
|
||||
ret = udl_submit_urb(dev, urb, len);
|
||||
bytes_sent += len;
|
||||
} else
|
||||
|
@ -153,11 +153,11 @@ static void udl_compress_hline16(
|
||||
raw_pixels_count_byte = cmd++; /* we'll know this later */
|
||||
raw_pixel_start = pixel;
|
||||
|
||||
cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
|
||||
min((int)(pixel_end - pixel) / bpp,
|
||||
(int)(cmd_buffer_end - cmd) / 2))) * bpp;
|
||||
cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
|
||||
(unsigned long)(pixel_end - pixel) / bpp,
|
||||
(unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
|
||||
|
||||
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
|
||||
prefetch_range((void *) pixel, cmd_pixel_end - pixel);
|
||||
pixel_val16 = get_pixel_val16(pixel, bpp);
|
||||
|
||||
while (pixel < cmd_pixel_end) {
|
||||
@ -193,6 +193,9 @@ static void udl_compress_hline16(
|
||||
if (pixel > raw_pixel_start) {
|
||||
/* finalize last RAW span */
|
||||
*raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
|
||||
} else {
|
||||
/* undo unused byte */
|
||||
cmd--;
|
||||
}
|
||||
|
||||
*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
|
||||
|
Loading…
Reference in New Issue
Block a user