drm/msm/sde: msm and sde driver snanpshot
This snapshot includes SDE and supporting include files.
This change also has the copyright year update in the files.
Snapshot was taken from msm-4.14 as of commit 1df57774a520
("Merge "ARM: dts: msm: Remove dma-coherent for IPA for sdxprairie"").
Change-Id: I328399cce8cd1eb031c53730003ec970a2d687be
Signed-off-by: Satya Rama Aditya Pinapala <psraditya30@codeaurora.org>
This commit is contained in:
@@ -298,62 +298,46 @@ u32 msm_readl(const void __iomem *addr)
|
||||
return val;
|
||||
}
|
||||
|
||||
struct vblank_event {
|
||||
struct list_head node;
|
||||
struct vblank_work {
|
||||
struct kthread_work work;
|
||||
int crtc_id;
|
||||
bool enable;
|
||||
struct msm_drm_private *priv;
|
||||
};
|
||||
|
||||
static void vblank_ctrl_worker(struct kthread_work *work)
|
||||
{
|
||||
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
|
||||
struct msm_vblank_ctrl, work);
|
||||
struct msm_drm_private *priv = container_of(vbl_ctrl,
|
||||
struct msm_drm_private, vblank_ctrl);
|
||||
struct vblank_work *cur_work = container_of(work,
|
||||
struct vblank_work, work);
|
||||
struct msm_drm_private *priv = cur_work->priv;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
struct vblank_event *vbl_ev, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(tmp_head);
|
||||
|
||||
spin_lock_irqsave(&vbl_ctrl->lock, flags);
|
||||
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
|
||||
list_del(&vbl_ev->node);
|
||||
list_add_tail(&vbl_ev->node, &tmp_head);
|
||||
}
|
||||
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
|
||||
if (cur_work->enable)
|
||||
kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
|
||||
else
|
||||
kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
|
||||
|
||||
list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
|
||||
if (vbl_ev->enable)
|
||||
kms->funcs->enable_vblank(kms,
|
||||
priv->crtcs[vbl_ev->crtc_id]);
|
||||
else
|
||||
kms->funcs->disable_vblank(kms,
|
||||
priv->crtcs[vbl_ev->crtc_id]);
|
||||
|
||||
kfree(vbl_ev);
|
||||
}
|
||||
kfree(cur_work);
|
||||
}
|
||||
|
||||
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
|
||||
int crtc_id, bool enable)
|
||||
{
|
||||
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
|
||||
struct vblank_event *vbl_ev;
|
||||
unsigned long flags;
|
||||
struct vblank_work *cur_work;
|
||||
|
||||
vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
|
||||
if (!vbl_ev)
|
||||
if (!priv || crtc_id >= priv->num_crtcs)
|
||||
return -EINVAL;
|
||||
|
||||
cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
|
||||
if (!cur_work)
|
||||
return -ENOMEM;
|
||||
|
||||
vbl_ev->crtc_id = crtc_id;
|
||||
vbl_ev->enable = enable;
|
||||
kthread_init_work(&cur_work->work, vblank_ctrl_worker);
|
||||
cur_work->crtc_id = crtc_id;
|
||||
cur_work->enable = enable;
|
||||
cur_work->priv = priv;
|
||||
|
||||
spin_lock_irqsave(&vbl_ctrl->lock, flags);
|
||||
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
|
||||
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
|
||||
|
||||
kthread_queue_work(&priv->disp_thread[crtc_id].worker,
|
||||
&vbl_ctrl->work);
|
||||
kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -365,20 +349,8 @@ static int msm_drm_uninit(struct device *dev)
|
||||
struct msm_drm_private *priv = ddev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
struct msm_gpu *gpu = priv->gpu;
|
||||
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
|
||||
struct vblank_event *vbl_ev, *tmp;
|
||||
int i;
|
||||
|
||||
/* We must cancel and cleanup any pending vblank enable/disable
|
||||
* work before drm_irq_uninstall() to avoid work re-enabling an
|
||||
* irq after uninstall has disabled it.
|
||||
*/
|
||||
kthread_flush_work(&vbl_ctrl->work);
|
||||
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
|
||||
list_del(&vbl_ev->node);
|
||||
kfree(vbl_ev);
|
||||
}
|
||||
|
||||
/* clean up display commit/event worker threads */
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
if (priv->disp_thread[i].thread) {
|
||||
@@ -778,9 +750,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
||||
|
||||
INIT_LIST_HEAD(&priv->client_event_list);
|
||||
INIT_LIST_HEAD(&priv->inactive_list);
|
||||
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
|
||||
kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
|
||||
spin_lock_init(&priv->vblank_ctrl.lock);
|
||||
|
||||
ret = sde_power_resource_init(pdev, &priv->phandle);
|
||||
if (ret) {
|
||||
|
||||
@@ -198,12 +198,6 @@ enum msm_mdp_conn_property {
|
||||
CONNECTOR_PROP_COUNT
|
||||
};
|
||||
|
||||
struct msm_vblank_ctrl {
|
||||
struct kthread_work work;
|
||||
struct list_head event_list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define MSM_GPU_MAX_RINGS 4
|
||||
#define MAX_H_TILES_PER_DISPLAY 2
|
||||
|
||||
@@ -644,7 +638,6 @@ struct msm_drm_private {
|
||||
struct notifier_block vmap_notifier;
|
||||
struct shrinker shrinker;
|
||||
|
||||
struct msm_vblank_ctrl vblank_ctrl;
|
||||
struct drm_atomic_state *pm_state;
|
||||
|
||||
/* task holding struct_mutex.. currently only used in submit path
|
||||
|
||||
@@ -341,7 +341,7 @@ dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
|
||||
sgt = dma_buf_map_attachment(obj->import_attach,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR_OR_NULL(sgt)) {
|
||||
DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
|
||||
DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
|
||||
PTR_ERR(sgt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -92,6 +92,12 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
|
||||
if (!bl_lvl && brightness)
|
||||
bl_lvl = 1;
|
||||
|
||||
if (display->panel->bl_config.bl_update ==
|
||||
BL_UPDATE_DELAY_UNTIL_FIRST_FRAME && !c_conn->allow_bl_update) {
|
||||
c_conn->unset_bl_level = bl_lvl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (c_conn->ops.set_backlight) {
|
||||
event.type = DRM_EVENT_SYS_BACKLIGHT;
|
||||
event.length = sizeof(u32);
|
||||
@@ -99,6 +105,7 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
|
||||
c_conn->base.dev, &event, (u8 *)&brightness);
|
||||
rc = c_conn->ops.set_backlight(&c_conn->base,
|
||||
c_conn->display, bl_lvl);
|
||||
c_conn->unset_bl_level = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
@@ -543,6 +550,26 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void sde_connector_set_qsync_params(struct drm_connector *connector)
|
||||
{
|
||||
struct sde_connector *c_conn = to_sde_connector(connector);
|
||||
u32 qsync_propval;
|
||||
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
c_conn->qsync_updated = false;
|
||||
qsync_propval = sde_connector_get_property(c_conn->base.state,
|
||||
CONNECTOR_PROP_QSYNC_MODE);
|
||||
|
||||
if (qsync_propval != c_conn->qsync_mode) {
|
||||
SDE_DEBUG("updated qsync mode %d -> %d\n", c_conn->qsync_mode,
|
||||
qsync_propval);
|
||||
c_conn->qsync_updated = true;
|
||||
c_conn->qsync_mode = qsync_propval;
|
||||
}
|
||||
}
|
||||
|
||||
static int _sde_connector_update_dirty_properties(
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
@@ -557,7 +584,6 @@ static int _sde_connector_update_dirty_properties(
|
||||
|
||||
c_conn = to_sde_connector(connector);
|
||||
c_state = to_sde_connector_state(connector->state);
|
||||
c_conn->qsync_updated = false;
|
||||
|
||||
while ((idx = msm_property_pop_dirty(&c_conn->property_info,
|
||||
&c_state->property_state)) >= 0) {
|
||||
@@ -573,19 +599,17 @@ static int _sde_connector_update_dirty_properties(
|
||||
case CONNECTOR_PROP_AD_BL_SCALE:
|
||||
_sde_connector_update_bl_scale(c_conn);
|
||||
break;
|
||||
case CONNECTOR_PROP_QSYNC_MODE:
|
||||
c_conn->qsync_updated = true;
|
||||
c_conn->qsync_mode = sde_connector_get_property(
|
||||
connector->state, CONNECTOR_PROP_QSYNC_MODE);
|
||||
break;
|
||||
default:
|
||||
/* nothing to do for most properties */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Special handling for postproc properties */
|
||||
if (c_conn->bl_scale_dirty) {
|
||||
/*
|
||||
* Special handling for postproc properties and
|
||||
* for updating backlight if any unset backlight level is present
|
||||
*/
|
||||
if (c_conn->bl_scale_dirty || c_conn->unset_bl_level) {
|
||||
_sde_connector_update_bl_scale(c_conn);
|
||||
c_conn->bl_scale_dirty = false;
|
||||
}
|
||||
@@ -658,29 +682,44 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector)
|
||||
sde_connector_schedule_status_work(connector, false);
|
||||
|
||||
c_conn = to_sde_connector(connector);
|
||||
if (c_conn->panel_dead) {
|
||||
if (c_conn->bl_device) {
|
||||
c_conn->bl_device->props.power = FB_BLANK_POWERDOWN;
|
||||
c_conn->bl_device->props.state |= BL_CORE_FBBLANK;
|
||||
backlight_update_status(c_conn->bl_device);
|
||||
}
|
||||
|
||||
c_conn->allow_bl_update = false;
|
||||
}
|
||||
|
||||
void sde_connector_helper_bridge_enable(struct drm_connector *connector)
|
||||
{
|
||||
struct sde_connector *c_conn = NULL;
|
||||
struct dsi_display *display;
|
||||
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
c_conn = to_sde_connector(connector);
|
||||
display = (struct dsi_display *) c_conn->display;
|
||||
|
||||
/* Special handling for ESD recovery case */
|
||||
if (c_conn->panel_dead) {
|
||||
/*
|
||||
* Special handling for some panels which need atleast
|
||||
* one frame to be transferred to GRAM before enabling backlight.
|
||||
* So delay backlight update to these panels until the
|
||||
* first frame commit is received from the HW.
|
||||
*/
|
||||
if (display->panel->bl_config.bl_update ==
|
||||
BL_UPDATE_DELAY_UNTIL_FIRST_FRAME)
|
||||
sde_encoder_wait_for_event(c_conn->encoder,
|
||||
MSM_ENC_TX_COMPLETE);
|
||||
c_conn->allow_bl_update = true;
|
||||
|
||||
if (c_conn->bl_device) {
|
||||
c_conn->bl_device->props.power = FB_BLANK_UNBLANK;
|
||||
c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK;
|
||||
backlight_update_status(c_conn->bl_device);
|
||||
c_conn->panel_dead = false;
|
||||
}
|
||||
c_conn->panel_dead = false;
|
||||
}
|
||||
|
||||
int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
|
||||
@@ -1273,24 +1312,17 @@ void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts)
|
||||
static void sde_connector_update_hdr_props(struct drm_connector *connector)
|
||||
{
|
||||
struct sde_connector *c_conn = to_sde_connector(connector);
|
||||
struct drm_msm_ext_hdr_properties hdr = {};
|
||||
struct drm_msm_ext_hdr_properties hdr = {
|
||||
connector->hdr_metadata_type_one,
|
||||
connector->hdr_supported,
|
||||
connector->hdr_eotf,
|
||||
connector->hdr_max_luminance,
|
||||
connector->hdr_avg_luminance,
|
||||
connector->hdr_min_luminance,
|
||||
};
|
||||
|
||||
hdr.hdr_supported = connector->hdr_supported;
|
||||
|
||||
if (hdr.hdr_supported) {
|
||||
hdr.hdr_eotf = connector->hdr_eotf;
|
||||
hdr.hdr_metadata_type_one = connector->hdr_metadata_type_one;
|
||||
hdr.hdr_max_luminance = connector->hdr_max_luminance;
|
||||
hdr.hdr_avg_luminance = connector->hdr_avg_luminance;
|
||||
hdr.hdr_min_luminance = connector->hdr_min_luminance;
|
||||
|
||||
msm_property_set_blob(&c_conn->property_info,
|
||||
&c_conn->blob_ext_hdr,
|
||||
&hdr,
|
||||
sizeof(hdr),
|
||||
CONNECTOR_PROP_EXT_HDR_INFO);
|
||||
|
||||
}
|
||||
msm_property_set_blob(&c_conn->property_info, &c_conn->blob_ext_hdr,
|
||||
&hdr, sizeof(hdr), CONNECTOR_PROP_EXT_HDR_INFO);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
@@ -1690,7 +1722,8 @@ static int sde_connector_get_modes(struct drm_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
sde_connector_update_hdr_props(connector);
|
||||
if (c_conn->hdr_capable)
|
||||
sde_connector_update_hdr_props(connector);
|
||||
|
||||
return mode_count;
|
||||
}
|
||||
@@ -1802,6 +1835,7 @@ static void _sde_connector_report_panel_dead(struct sde_connector *conn)
|
||||
int sde_connector_esd_status(struct drm_connector *conn)
|
||||
{
|
||||
struct sde_connector *sde_conn = NULL;
|
||||
struct dsi_display *display;
|
||||
int ret = 0;
|
||||
|
||||
if (!conn)
|
||||
@@ -1811,10 +1845,17 @@ int sde_connector_esd_status(struct drm_connector *conn)
|
||||
if (!sde_conn || !sde_conn->ops.check_status)
|
||||
return ret;
|
||||
|
||||
display = sde_conn->display;
|
||||
|
||||
/* protect this call with ESD status check call */
|
||||
mutex_lock(&sde_conn->lock);
|
||||
ret = sde_conn->ops.check_status(&sde_conn->base, sde_conn->display,
|
||||
true);
|
||||
if (atomic_read(&(display->panel->esd_recovery_pending))) {
|
||||
SDE_ERROR("ESD recovery already pending\n");
|
||||
mutex_unlock(&sde_conn->lock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
ret = sde_conn->ops.check_status(&sde_conn->base,
|
||||
sde_conn->display, true);
|
||||
mutex_unlock(&sde_conn->lock);
|
||||
|
||||
if (ret <= 0) {
|
||||
@@ -1936,6 +1977,9 @@ static int sde_connector_populate_mode_info(struct drm_connector *conn,
|
||||
continue;
|
||||
}
|
||||
|
||||
sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
|
||||
mode_info.mdp_transfer_time_us);
|
||||
|
||||
if (!mode_info.roi_caps.num_roi)
|
||||
continue;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _SDE_CONNECTOR_H_
|
||||
@@ -366,9 +366,12 @@ struct sde_connector_evt {
|
||||
* @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
|
||||
* @bl_scale: BL scale value for ABA feature
|
||||
* @bl_scale_ad: BL scale value for AD feature
|
||||
* @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
|
||||
* @unset_bl_level: BL level that needs to be set later
|
||||
* @allow_bl_update: Flag to indicate if BL update is allowed currently or not
|
||||
* @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
|
||||
* @qsync_updated: Qsync settings were updated
|
||||
* last_cmd_tx_sts: status of the last command transfer
|
||||
* @hdr_capable: external hdr support present
|
||||
*/
|
||||
struct sde_connector {
|
||||
struct drm_connector base;
|
||||
@@ -413,11 +416,14 @@ struct sde_connector {
|
||||
bool bl_scale_dirty;
|
||||
u32 bl_scale;
|
||||
u32 bl_scale_ad;
|
||||
u32 unset_bl_level;
|
||||
bool allow_bl_update;
|
||||
|
||||
u32 qsync_mode;
|
||||
bool qsync_updated;
|
||||
|
||||
bool last_cmd_tx_sts;
|
||||
bool hdr_capable;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -456,9 +462,17 @@ struct sde_connector {
|
||||
* @C: Pointer to drm connector structure
|
||||
* Returns: True if qsync is updated; false otherwise
|
||||
*/
|
||||
#define sde_connector_qsync_updated(C) \
|
||||
#define sde_connector_is_qsync_updated(C) \
|
||||
((C) ? to_sde_connector((C))->qsync_updated : 0)
|
||||
|
||||
/**
|
||||
* sde_connector_get_qsync_mode - get sde connector's qsync_mode
|
||||
* @C: Pointer to drm connector structure
|
||||
* Returns: Current cached qsync_mode for given connector
|
||||
*/
|
||||
#define sde_connector_get_qsync_mode(C) \
|
||||
((C) ? to_sde_connector((C))->qsync_mode : 0)
|
||||
|
||||
/**
|
||||
* sde_connector_get_propinfo - get sde connector's property info pointer
|
||||
* @C: Pointer to drm connector structure
|
||||
@@ -672,6 +686,17 @@ int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
|
||||
*/
|
||||
int sde_connector_get_dpms(struct drm_connector *connector);
|
||||
|
||||
/**
|
||||
* sde_connector_set_qsync_params - set status of qsync_updated for current
|
||||
* frame and update the cached qsync_mode
|
||||
* @connector: pointer to drm connector
|
||||
*
|
||||
* This must be called after the connector set_property values are applied,
|
||||
* and before sde_connector's qsync_updated or qsync_mode fields are accessed.
|
||||
* It must only be called once per frame update for the given connector.
|
||||
*/
|
||||
void sde_connector_set_qsync_params(struct drm_connector *connector);
|
||||
|
||||
/**
|
||||
* sde_connector_trigger_event - indicate that an event has occurred
|
||||
* Any callbacks that have been registered against this event will
|
||||
|
||||
@@ -1181,6 +1181,180 @@ static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
|
||||
}
|
||||
}
|
||||
|
||||
struct plane_state {
|
||||
struct sde_plane_state *sde_pstate;
|
||||
const struct drm_plane_state *drm_pstate;
|
||||
int stage;
|
||||
u32 pipe_id;
|
||||
};
|
||||
|
||||
static int pstate_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct plane_state *pa = (struct plane_state *)a;
|
||||
struct plane_state *pb = (struct plane_state *)b;
|
||||
int rc = 0;
|
||||
int pa_zpos, pb_zpos;
|
||||
|
||||
pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
|
||||
pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
|
||||
|
||||
if (pa_zpos != pb_zpos)
|
||||
rc = pa_zpos - pb_zpos;
|
||||
else
|
||||
rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* validate and set source split:
|
||||
* use pstates sorted by stage to check planes on same stage
|
||||
* we assume that all pipes are in source split so its valid to compare
|
||||
* without taking into account left/right mixer placement
|
||||
*/
|
||||
static int _sde_crtc_validate_src_split_order(struct drm_crtc *crtc,
|
||||
struct plane_state *pstates, int cnt)
|
||||
{
|
||||
struct plane_state *prv_pstate, *cur_pstate;
|
||||
struct sde_rect left_rect, right_rect;
|
||||
struct sde_kms *sde_kms;
|
||||
int32_t left_pid, right_pid;
|
||||
int32_t stage;
|
||||
int i, rc = 0;
|
||||
|
||||
sde_kms = _sde_crtc_get_kms(crtc);
|
||||
if (!sde_kms || !sde_kms->catalog) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 1; i < cnt; i++) {
|
||||
prv_pstate = &pstates[i - 1];
|
||||
cur_pstate = &pstates[i];
|
||||
|
||||
if (prv_pstate->stage != cur_pstate->stage)
|
||||
continue;
|
||||
|
||||
stage = cur_pstate->stage;
|
||||
|
||||
left_pid = prv_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
|
||||
prv_pstate->drm_pstate->crtc_y,
|
||||
prv_pstate->drm_pstate->crtc_w,
|
||||
prv_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
right_pid = cur_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
|
||||
cur_pstate->drm_pstate->crtc_y,
|
||||
cur_pstate->drm_pstate->crtc_w,
|
||||
cur_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
if (right_rect.x < left_rect.x) {
|
||||
swap(left_pid, right_pid);
|
||||
swap(left_rect, right_rect);
|
||||
swap(prv_pstate, cur_pstate);
|
||||
}
|
||||
|
||||
/*
|
||||
* - planes are enumerated in pipe-priority order such that
|
||||
* planes with lower drm_id must be left-most in a shared
|
||||
* blend-stage when using source split.
|
||||
* - planes in source split must be contiguous in width
|
||||
* - planes in source split must have same dest yoff and height
|
||||
*/
|
||||
if ((right_pid < left_pid) &&
|
||||
!sde_kms->catalog->pipe_order_type) {
|
||||
SDE_ERROR(
|
||||
"invalid src split cfg, stage:%d left:%d right:%d\n",
|
||||
stage, left_pid, right_pid);
|
||||
return -EINVAL;
|
||||
} else if (right_rect.x != (left_rect.x + left_rect.w)) {
|
||||
SDE_ERROR(
|
||||
"invalid coordinates, stage:%d l:%d-%d r:%d-%d\n",
|
||||
stage, left_rect.x, left_rect.w,
|
||||
right_rect.x, right_rect.w);
|
||||
return -EINVAL;
|
||||
} else if ((left_rect.y != right_rect.y) ||
|
||||
(left_rect.h != right_rect.h)) {
|
||||
SDE_ERROR(
|
||||
"stage:%d invalid yoff/ht: l_yxh:%dx%d r_yxh:%dx%d\n",
|
||||
stage, left_rect.y, left_rect.h,
|
||||
right_rect.y, right_rect.h);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void _sde_crtc_set_src_split_order(struct drm_crtc *crtc,
|
||||
struct plane_state *pstates, int cnt)
|
||||
{
|
||||
struct plane_state *prv_pstate, *cur_pstate, *nxt_pstate;
|
||||
struct sde_kms *sde_kms;
|
||||
struct sde_rect left_rect, right_rect;
|
||||
int32_t left_pid, right_pid;
|
||||
int32_t stage;
|
||||
int i;
|
||||
|
||||
sde_kms = _sde_crtc_get_kms(crtc);
|
||||
if (!sde_kms || !sde_kms->catalog) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sde_kms->catalog->pipe_order_type)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
prv_pstate = (i > 0) ? &pstates[i - 1] : NULL;
|
||||
cur_pstate = &pstates[i];
|
||||
nxt_pstate = ((i + 1) < cnt) ? &pstates[i + 1] : NULL;
|
||||
|
||||
if ((!prv_pstate) || (prv_pstate->stage != cur_pstate->stage)) {
|
||||
/*
|
||||
* reset if prv or nxt pipes are not in the same stage
|
||||
* as the cur pipe
|
||||
*/
|
||||
if ((!nxt_pstate)
|
||||
|| (nxt_pstate->stage != cur_pstate->stage))
|
||||
cur_pstate->sde_pstate->pipe_order_flags = 0;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
stage = cur_pstate->stage;
|
||||
|
||||
left_pid = prv_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
|
||||
prv_pstate->drm_pstate->crtc_y,
|
||||
prv_pstate->drm_pstate->crtc_w,
|
||||
prv_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
right_pid = cur_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
|
||||
cur_pstate->drm_pstate->crtc_y,
|
||||
cur_pstate->drm_pstate->crtc_w,
|
||||
cur_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
if (right_rect.x < left_rect.x) {
|
||||
swap(left_pid, right_pid);
|
||||
swap(left_rect, right_rect);
|
||||
swap(prv_pstate, cur_pstate);
|
||||
}
|
||||
|
||||
cur_pstate->sde_pstate->pipe_order_flags = SDE_SSPP_RIGHT;
|
||||
prv_pstate->sde_pstate->pipe_order_flags = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
cur_pstate = &pstates[i];
|
||||
sde_plane_setup_src_split_order(
|
||||
cur_pstate->drm_pstate->plane,
|
||||
cur_pstate->sde_pstate->multirect_index,
|
||||
cur_pstate->sde_pstate->pipe_order_flags);
|
||||
}
|
||||
}
|
||||
static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
|
||||
struct sde_crtc_mixer *mixer)
|
||||
@@ -1190,6 +1364,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
struct drm_plane_state *state;
|
||||
struct sde_crtc_state *cstate;
|
||||
struct sde_plane_state *pstate = NULL;
|
||||
struct plane_state *pstates = NULL;
|
||||
struct sde_format *format;
|
||||
struct sde_hw_ctl *ctl;
|
||||
struct sde_hw_mixer *lm;
|
||||
@@ -1197,7 +1372,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
struct sde_rect plane_crtc_roi;
|
||||
uint32_t stage_idx, lm_idx;
|
||||
int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
|
||||
int i;
|
||||
int i, cnt = 0;
|
||||
bool bg_alpha_enable = false;
|
||||
|
||||
if (!sde_crtc || !crtc->state || !mixer) {
|
||||
@@ -1209,6 +1384,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
lm = mixer->hw_lm;
|
||||
stage_cfg = &sde_crtc->stage_cfg;
|
||||
cstate = to_sde_crtc_state(crtc->state);
|
||||
pstates = kcalloc(SDE_PSTATES_MAX,
|
||||
sizeof(struct plane_state), GFP_KERNEL);
|
||||
if (!pstates)
|
||||
return;
|
||||
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
state = plane->state;
|
||||
@@ -1235,7 +1414,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
|
||||
if (!format) {
|
||||
SDE_ERROR("invalid format\n");
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
|
||||
@@ -1271,8 +1450,22 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
mixer[lm_idx].mixer_op_mode |=
|
||||
1 << pstate->stage;
|
||||
}
|
||||
|
||||
if (cnt >= SDE_PSTATES_MAX)
|
||||
continue;
|
||||
|
||||
pstates[cnt].sde_pstate = pstate;
|
||||
pstates[cnt].drm_pstate = state;
|
||||
pstates[cnt].stage = sde_plane_get_property(
|
||||
pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
|
||||
pstates[cnt].pipe_id = sde_plane_pipe(plane);
|
||||
|
||||
cnt++;
|
||||
}
|
||||
|
||||
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
|
||||
_sde_crtc_set_src_split_order(crtc, pstates, cnt);
|
||||
|
||||
if (lm && lm->ops.setup_dim_layer) {
|
||||
cstate = to_sde_crtc_state(crtc->state);
|
||||
for (i = 0; i < cstate->num_dim_layers; i++)
|
||||
@@ -1281,6 +1474,9 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
_sde_crtc_program_lm_output_roi(crtc);
|
||||
|
||||
end:
|
||||
kfree(pstates);
|
||||
}
|
||||
|
||||
static void _sde_crtc_swap_mixers_for_right_partial_update(
|
||||
@@ -1857,7 +2053,8 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
|
||||
SDE_DEBUG("dest scaler feature not supported\n");
|
||||
} else if (_sde_validate_hw_resources(sde_crtc)) {
|
||||
//do nothing
|
||||
} else if (!cstate->scl3_lut_cfg.is_configured) {
|
||||
} else if ((!cstate->scl3_lut_cfg.is_configured) &&
|
||||
(!is_qseed3_rev_qseed3lite(kms->catalog))) {
|
||||
SDE_ERROR("crtc%d:no LUT data available\n", crtc->base.id);
|
||||
} else {
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -2911,6 +3108,9 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_device *dev;
|
||||
struct sde_kms *sde_kms;
|
||||
struct sde_splash_display *splash_display;
|
||||
bool cont_splash_enabled = false;
|
||||
size_t i;
|
||||
|
||||
if (!crtc) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
@@ -2975,7 +3175,16 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
* apply color processing properties only if
|
||||
* smmu state is attached,
|
||||
*/
|
||||
if (sde_kms_is_cp_operation_allowed(sde_kms) && sde_crtc->enabled)
|
||||
for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
|
||||
splash_display = &sde_kms->splash_data.splash_display[i];
|
||||
if (splash_display->cont_splash_enabled &&
|
||||
splash_display->encoder &&
|
||||
crtc == splash_display->encoder->crtc)
|
||||
cont_splash_enabled = true;
|
||||
}
|
||||
|
||||
if (sde_kms_is_cp_operation_allowed(sde_kms) &&
|
||||
(cont_splash_enabled || sde_crtc->enabled))
|
||||
sde_cp_crtc_apply_properties(crtc);
|
||||
|
||||
/*
|
||||
@@ -3999,31 +4208,6 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
|
||||
sde_connector_schedule_status_work(cstate->connectors[i], true);
|
||||
}
|
||||
|
||||
struct plane_state {
|
||||
struct sde_plane_state *sde_pstate;
|
||||
const struct drm_plane_state *drm_pstate;
|
||||
int stage;
|
||||
u32 pipe_id;
|
||||
};
|
||||
|
||||
static int pstate_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct plane_state *pa = (struct plane_state *)a;
|
||||
struct plane_state *pb = (struct plane_state *)b;
|
||||
int rc = 0;
|
||||
int pa_zpos, pb_zpos;
|
||||
|
||||
pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
|
||||
pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
|
||||
|
||||
if (pa_zpos != pb_zpos)
|
||||
rc = pa_zpos - pb_zpos;
|
||||
else
|
||||
rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* no input validation - caller API has all the checks */
|
||||
static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
|
||||
struct plane_state pstates[], int cnt)
|
||||
@@ -4220,85 +4404,6 @@ static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _sde_crtc_check_src_split_order(struct drm_crtc *crtc,
|
||||
struct plane_state *pstates, int cnt)
|
||||
{
|
||||
struct sde_kms *sde_kms;
|
||||
int i, rc = 0;
|
||||
|
||||
sde_kms = _sde_crtc_get_kms(crtc);
|
||||
if (!sde_kms || !sde_kms->catalog) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 1; i < cnt; i++) {
|
||||
struct plane_state *prv_pstate, *cur_pstate;
|
||||
struct sde_rect left_rect, right_rect;
|
||||
int32_t left_pid, right_pid;
|
||||
int32_t stage;
|
||||
|
||||
prv_pstate = &pstates[i - 1];
|
||||
cur_pstate = &pstates[i];
|
||||
if (prv_pstate->stage != cur_pstate->stage)
|
||||
continue;
|
||||
|
||||
stage = cur_pstate->stage;
|
||||
|
||||
left_pid = prv_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
|
||||
prv_pstate->drm_pstate->crtc_y,
|
||||
prv_pstate->drm_pstate->crtc_w,
|
||||
prv_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
right_pid = cur_pstate->sde_pstate->base.plane->base.id;
|
||||
POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
|
||||
cur_pstate->drm_pstate->crtc_y,
|
||||
cur_pstate->drm_pstate->crtc_w,
|
||||
cur_pstate->drm_pstate->crtc_h, false);
|
||||
|
||||
if (right_rect.x < left_rect.x) {
|
||||
swap(left_pid, right_pid);
|
||||
swap(left_rect, right_rect);
|
||||
swap(prv_pstate, cur_pstate);
|
||||
}
|
||||
|
||||
/**
|
||||
* - planes are enumerated in pipe-priority order such that
|
||||
* planes with lower drm_id must be left-most in a shared
|
||||
* blend-stage when using source split.
|
||||
* - planes in source split must be contiguous in width
|
||||
* - planes in source split must have same dest yoff and height
|
||||
*/
|
||||
if ((right_pid < left_pid) &&
|
||||
!sde_kms->catalog->pipe_order_type) {
|
||||
SDE_ERROR(
|
||||
"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
|
||||
stage, left_pid, right_pid);
|
||||
return -EINVAL;
|
||||
} else if (right_rect.x != (left_rect.x + left_rect.w)) {
|
||||
SDE_ERROR(
|
||||
"non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n",
|
||||
stage, left_rect.x, left_rect.w,
|
||||
right_rect.x, right_rect.w);
|
||||
return -EINVAL;
|
||||
} else if ((left_rect.y != right_rect.y) ||
|
||||
(left_rect.h != right_rect.h)) {
|
||||
SDE_ERROR(
|
||||
"source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n",
|
||||
stage, left_rect.y, right_rect.y,
|
||||
left_rect.h, right_rect.h);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sde_kms->catalog->pipe_order_type)
|
||||
cur_pstate->sde_pstate->pipe_order_flags =
|
||||
SDE_SSPP_RIGHT;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state, struct plane_state pstates[],
|
||||
int cnt)
|
||||
@@ -4383,7 +4488,6 @@ static int _sde_crtc_check_get_pstates(struct drm_crtc *crtc,
|
||||
continue;
|
||||
|
||||
pstates[*cnt].sde_pstate = to_sde_plane_state(pstate);
|
||||
pstates[*cnt].sde_pstate->pipe_order_flags = 0x0;
|
||||
pstates[*cnt].drm_pstate = pstate;
|
||||
pstates[*cnt].stage = sde_plane_get_property(
|
||||
pstates[*cnt].sde_pstate, PLANE_PROP_ZPOS);
|
||||
@@ -4504,7 +4608,6 @@ static int _sde_crtc_check_zpos(struct drm_crtc_state *state,
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state,
|
||||
struct plane_state *pstates,
|
||||
@@ -4549,7 +4652,7 @@ static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
|
||||
* we assume that all pipes are in source split so its valid to compare
|
||||
* without taking into account left/right mixer placement
|
||||
*/
|
||||
rc = _sde_crtc_check_src_split_order(crtc, pstates, cnt);
|
||||
rc = _sde_crtc_validate_src_split_order(crtc, pstates, cnt);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
||||
@@ -72,9 +72,7 @@
|
||||
|
||||
#define IDLE_SHORT_TIMEOUT 1
|
||||
|
||||
#define FAULT_TOLERENCE_DELTA_IN_MS 2
|
||||
|
||||
#define FAULT_TOLERENCE_WAIT_IN_MS 5
|
||||
#define EVT_TIME_OUT_SPLIT 2
|
||||
|
||||
/* Maximum number of VSYNC wait attempts for RSC state transition */
|
||||
#define MAX_RSC_WAIT 5
|
||||
@@ -177,6 +175,7 @@ enum sde_enc_rc_states {
|
||||
* @hw_pp Handle to the pingpong blocks used for the display. No.
|
||||
* pingpong blocks can be different than num_phys_encs.
|
||||
* @hw_dsc: Array of DSC block handles used for the display.
|
||||
* @dirty_dsc_ids: Cached dsc indexes for dirty DSC blocks needing flush
|
||||
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
|
||||
* for partial update right-only cases, such as pingpong
|
||||
* split where virtual pingpong does not generate IRQs
|
||||
@@ -222,6 +221,7 @@ enum sde_enc_rc_states {
|
||||
* @recovery_events_enabled: status of hw recovery feature enable by client
|
||||
* @elevated_ahb_vote: increase AHB bus speed for the first frame
|
||||
* after power collapse
|
||||
* @pm_qos_cpu_req: pm_qos request for cpu frequency
|
||||
*/
|
||||
struct sde_encoder_virt {
|
||||
struct drm_encoder base;
|
||||
@@ -237,6 +237,8 @@ struct sde_encoder_virt {
|
||||
struct sde_encoder_phys *cur_master;
|
||||
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
|
||||
enum sde_dsc dirty_dsc_ids[MAX_CHANNELS_PER_ENC];
|
||||
|
||||
bool intfs_swapped;
|
||||
|
||||
@@ -277,6 +279,7 @@ struct sde_encoder_virt {
|
||||
|
||||
bool recovery_events_enabled;
|
||||
bool elevated_ahb_vote;
|
||||
struct pm_qos_request pm_qos_cpu_req;
|
||||
};
|
||||
|
||||
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
|
||||
@@ -297,73 +300,42 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
|
||||
static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
|
||||
struct sde_kms *sde_kms)
|
||||
{
|
||||
struct msm_drm_private *priv;
|
||||
struct sde_kms *sde_kms;
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
struct pm_qos_request *req;
|
||||
u32 cpu_mask;
|
||||
u32 cpu_dma_latency;
|
||||
int cpu;
|
||||
|
||||
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
|
||||
SDE_ERROR("drm device invalid\n");
|
||||
return;
|
||||
}
|
||||
|
||||
priv = drm_enc->dev->dev_private;
|
||||
if (!priv->kms) {
|
||||
SDE_ERROR("invalid kms\n");
|
||||
return;
|
||||
}
|
||||
|
||||
sde_kms = to_sde_kms(priv->kms);
|
||||
if (!sde_kms || !sde_kms->catalog)
|
||||
if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
|
||||
return;
|
||||
|
||||
cpu_mask = sde_kms->catalog->perf.cpu_mask;
|
||||
cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
|
||||
if (!cpu_mask)
|
||||
return;
|
||||
|
||||
if (atomic_inc_return(&sde_kms->pm_qos_counts) == 1) {
|
||||
req = &sde_kms->pm_qos_cpu_req;
|
||||
req->type = PM_QOS_REQ_AFFINE_CORES;
|
||||
cpumask_empty(&req->cpus_affine);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if ((1 << cpu) & cpu_mask)
|
||||
cpumask_set_cpu(cpu, &req->cpus_affine);
|
||||
}
|
||||
pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
|
||||
cpu_dma_latency);
|
||||
req = &sde_enc->pm_qos_cpu_req;
|
||||
req->type = PM_QOS_REQ_AFFINE_CORES;
|
||||
cpumask_empty(&req->cpus_affine);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if ((1 << cpu) & cpu_mask)
|
||||
cpumask_set_cpu(cpu, &req->cpus_affine);
|
||||
}
|
||||
pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
|
||||
|
||||
SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
|
||||
}
|
||||
|
||||
static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc)
|
||||
static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc,
|
||||
struct sde_kms *sde_kms)
|
||||
{
|
||||
struct msm_drm_private *priv;
|
||||
struct sde_kms *sde_kms;
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
|
||||
if (!drm_enc->dev || !drm_enc->dev->dev_private) {
|
||||
SDE_ERROR("drm device invalid\n");
|
||||
return;
|
||||
}
|
||||
|
||||
priv = drm_enc->dev->dev_private;
|
||||
if (!priv->kms) {
|
||||
SDE_ERROR("invalid kms\n");
|
||||
return;
|
||||
}
|
||||
|
||||
sde_kms = to_sde_kms(priv->kms);
|
||||
if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
|
||||
if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
|
||||
return;
|
||||
|
||||
atomic_add_unless(&sde_kms->pm_qos_counts, -1, 0);
|
||||
if (atomic_read(&sde_kms->pm_qos_counts) == 0)
|
||||
pm_qos_remove_request(&sde_kms->pm_qos_cpu_req);
|
||||
pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
|
||||
}
|
||||
|
||||
static struct drm_connector_state *_sde_encoder_get_conn_state(
|
||||
@@ -431,6 +403,28 @@ static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
|
||||
return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
|
||||
}
|
||||
|
||||
static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
|
||||
s64 timeout_ms, struct sde_encoder_wait_info *info)
|
||||
{
|
||||
int rc = 0;
|
||||
s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
|
||||
ktime_t cur_ktime;
|
||||
ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
|
||||
|
||||
do {
|
||||
rc = wait_event_timeout(*(info->wq),
|
||||
atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
|
||||
cur_ktime = ktime_get();
|
||||
|
||||
SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
|
||||
timeout_ms, atomic_read(info->atomic_cnt));
|
||||
/* If we timed out, counter is valid and time is less, wait again */
|
||||
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
|
||||
(ktime_compare_safe(exp_ktime, cur_ktime) > 0));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
|
||||
{
|
||||
enum sde_rm_topology_name topology;
|
||||
@@ -455,14 +449,6 @@ bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
|
||||
return false;
|
||||
}
|
||||
|
||||
int sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
|
||||
return sde_enc && sde_enc->cur_master &&
|
||||
sde_enc->cur_master->in_clone_mode;
|
||||
}
|
||||
|
||||
bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
@@ -529,7 +515,7 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
|
||||
{
|
||||
struct sde_encoder_irq *irq;
|
||||
u32 irq_status;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
|
||||
SDE_ERROR("invalid params\n");
|
||||
@@ -561,10 +547,22 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
|
||||
irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
|
||||
atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);
|
||||
|
||||
ret = sde_encoder_helper_wait_event_timeout(
|
||||
DRMID(phys_enc->parent),
|
||||
irq->hw_idx,
|
||||
wait_info);
|
||||
/*
|
||||
* Some module X may disable interrupt for longer duration
|
||||
* and it may trigger all interrupts including timer interrupt
|
||||
* when module X again enable the interrupt.
|
||||
* That may cause interrupt wait timeout API in this API.
|
||||
* It is handled by split the wait timer in two halves.
|
||||
*/
|
||||
|
||||
for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) {
|
||||
ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent),
|
||||
irq->hw_idx,
|
||||
(wait_info->timeout_ms/EVT_TIME_OUT_SPLIT),
|
||||
wait_info);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret <= 0) {
|
||||
irq_status = sde_core_irq_read(phys_enc->sde_kms,
|
||||
@@ -920,6 +918,28 @@ void sde_encoder_helper_split_config(
|
||||
}
|
||||
}
|
||||
|
||||
bool sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc;
|
||||
int i = 0;
|
||||
|
||||
if (!drm_enc)
|
||||
return false;
|
||||
|
||||
sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
if (!sde_enc)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < sde_enc->num_phys_encs; i++) {
|
||||
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
|
||||
|
||||
if (phys && phys->in_clone_mode)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int _sde_encoder_atomic_check_phys_enc(struct sde_encoder_virt *sde_enc,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
@@ -1244,11 +1264,25 @@ static bool _sde_encoder_dsc_ich_reset_override_needed(bool pu_en,
|
||||
|
||||
static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
|
||||
struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
|
||||
u32 common_mode, bool ich_reset, bool enable)
|
||||
u32 common_mode, bool ich_reset, bool enable,
|
||||
struct sde_hw_pingpong *hw_dsc_pp)
|
||||
{
|
||||
if (!enable) {
|
||||
if (hw_pp->ops.disable_dsc)
|
||||
hw_pp->ops.disable_dsc(hw_pp);
|
||||
if (hw_dsc_pp && hw_dsc_pp->ops.disable_dsc)
|
||||
hw_dsc_pp->ops.disable_dsc(hw_dsc_pp);
|
||||
|
||||
if (hw_dsc && hw_dsc->ops.dsc_disable)
|
||||
hw_dsc->ops.dsc_disable(hw_dsc);
|
||||
|
||||
if (hw_dsc && hw_dsc->ops.bind_pingpong_blk)
|
||||
hw_dsc->ops.bind_pingpong_blk(hw_dsc, false,
|
||||
PINGPONG_MAX);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dsc || !hw_dsc || !hw_pp || !hw_dsc_pp) {
|
||||
SDE_ERROR("invalid params %d %d %d %d\n", !dsc, !hw_dsc,
|
||||
!hw_pp, !hw_dsc_pp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1258,14 +1292,14 @@ static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
|
||||
if (hw_dsc->ops.dsc_config_thresh)
|
||||
hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
|
||||
|
||||
if (hw_pp->ops.setup_dsc)
|
||||
hw_pp->ops.setup_dsc(hw_pp);
|
||||
if (hw_dsc_pp->ops.setup_dsc)
|
||||
hw_dsc_pp->ops.setup_dsc(hw_dsc_pp);
|
||||
|
||||
if (hw_dsc->ops.bind_pingpong_blk)
|
||||
hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
|
||||
|
||||
if (hw_pp->ops.enable_dsc)
|
||||
hw_pp->ops.enable_dsc(hw_pp);
|
||||
if (hw_dsc_pp->ops.enable_dsc)
|
||||
hw_dsc_pp->ops.enable_dsc(hw_dsc_pp);
|
||||
}
|
||||
|
||||
static void _sde_encoder_get_connector_roi(
|
||||
@@ -1294,6 +1328,7 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
|
||||
int ich_res, dsc_common_mode = 0;
|
||||
|
||||
struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
|
||||
struct sde_hw_pingpong *hw_dsc_pp = sde_enc->hw_dsc_pp[0];
|
||||
struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
|
||||
struct sde_encoder_phys *enc_master = sde_enc->cur_master;
|
||||
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
|
||||
@@ -1337,11 +1372,7 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
|
||||
SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
|
||||
|
||||
_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
|
||||
ich_res, true);
|
||||
if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) {
|
||||
pr_err("Invalid dsc count:%d\n", cfg.dsc_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
ich_res, true, hw_dsc_pp);
|
||||
cfg.dsc[cfg.dsc_count++] = hw_dsc->idx;
|
||||
|
||||
/* setup dsc active configuration in the control path */
|
||||
@@ -1372,6 +1403,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
|
||||
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
|
||||
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct msm_mode_info mode_info;
|
||||
bool half_panel_partial_update;
|
||||
@@ -1384,8 +1416,9 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
hw_pp[i] = sde_enc->hw_pp[i];
|
||||
hw_dsc[i] = sde_enc->hw_dsc[i];
|
||||
hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
|
||||
|
||||
if (!hw_pp[i] || !hw_dsc[i]) {
|
||||
if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
|
||||
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1453,7 +1486,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
|
||||
SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
|
||||
dsc_common_mode, i, active);
|
||||
_sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
|
||||
dsc_common_mode, ich_res, active);
|
||||
dsc_common_mode, ich_res, active, hw_dsc_pp[i]);
|
||||
|
||||
if (active) {
|
||||
if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) {
|
||||
@@ -1461,8 +1494,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
|
||||
cfg.dsc_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
cfg.dsc[i] = hw_dsc[i]->idx;
|
||||
cfg.dsc_count++;
|
||||
cfg.dsc[cfg.dsc_count++] = hw_dsc[i]->idx;
|
||||
|
||||
if (hw_ctl->ops.update_bitmask_dsc)
|
||||
hw_ctl->ops.update_bitmask_dsc(hw_ctl,
|
||||
@@ -1494,6 +1526,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
|
||||
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
|
||||
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct msm_display_dsc_info *dsc = NULL;
|
||||
struct msm_mode_info mode_info;
|
||||
bool half_panel_partial_update;
|
||||
@@ -1506,8 +1539,9 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
hw_pp[i] = sde_enc->hw_pp[i];
|
||||
hw_dsc[i] = sde_enc->hw_dsc[i];
|
||||
hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
|
||||
|
||||
if (!hw_pp[i] || !hw_dsc[i]) {
|
||||
if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
|
||||
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1552,7 +1586,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
|
||||
dsc_common_mode, i, params->affected_displays);
|
||||
|
||||
_sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
|
||||
ich_res, true);
|
||||
ich_res, true, hw_dsc_pp[0]);
|
||||
cfg.dsc[0] = hw_dsc[0]->idx;
|
||||
cfg.dsc_count++;
|
||||
if (hw_ctl->ops.update_bitmask_dsc)
|
||||
@@ -1560,7 +1594,7 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
|
||||
|
||||
|
||||
_sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
|
||||
ich_res, !half_panel_partial_update);
|
||||
ich_res, !half_panel_partial_update, hw_dsc_pp[1]);
|
||||
if (!half_panel_partial_update) {
|
||||
cfg.dsc[1] = hw_dsc[1]->idx;
|
||||
cfg.dsc_count++;
|
||||
@@ -1772,32 +1806,50 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
|
||||
}
|
||||
}
|
||||
|
||||
static int _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
|
||||
static void _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
|
||||
{
|
||||
int i, ret = 0;
|
||||
int i;
|
||||
struct sde_hw_pingpong *hw_pp = NULL;
|
||||
struct sde_hw_pingpong *hw_dsc_pp = NULL;
|
||||
struct sde_hw_dsc *hw_dsc = NULL;
|
||||
struct sde_hw_ctl *hw_ctl = NULL;
|
||||
struct sde_ctl_dsc_cfg cfg;
|
||||
|
||||
if (!sde_enc || !sde_enc->phys_encs[0] ||
|
||||
!sde_enc->phys_encs[0]->connector) {
|
||||
SDE_ERROR("invalid params %d %d\n",
|
||||
!sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1);
|
||||
return -EINVAL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (sde_enc->cur_master)
|
||||
hw_ctl = sde_enc->cur_master->hw_ctl;
|
||||
|
||||
/* Disable DSC for all the pp's present in this topology */
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
hw_pp = sde_enc->hw_pp[i];
|
||||
hw_dsc = sde_enc->hw_dsc[i];
|
||||
hw_dsc_pp = sde_enc->hw_dsc_pp[i];
|
||||
|
||||
if (hw_pp && hw_pp->ops.disable_dsc)
|
||||
hw_pp->ops.disable_dsc(hw_pp);
|
||||
_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, NULL,
|
||||
0, 0, 0, hw_dsc_pp);
|
||||
|
||||
if (hw_dsc && hw_dsc->ops.dsc_disable)
|
||||
hw_dsc->ops.dsc_disable(hw_dsc);
|
||||
if (hw_dsc)
|
||||
sde_enc->dirty_dsc_ids[i] = hw_dsc->idx;
|
||||
}
|
||||
|
||||
return ret;
|
||||
/* Clear the DSC ACTIVE config for this CTL */
|
||||
if (hw_ctl && hw_ctl->ops.setup_dsc_cfg) {
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Since pending flushes from previous commit get cleared
|
||||
* sometime after this point, setting DSC flush bits now
|
||||
* will have no effect. Therefore dirty_dsc_ids track which
|
||||
* DSC blocks must be flushed for the next trigger.
|
||||
*/
|
||||
}
|
||||
|
||||
static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
|
||||
@@ -1904,8 +1956,6 @@ static int _sde_encoder_update_rsc_client(
|
||||
struct msm_mode_info mode_info;
|
||||
int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
|
||||
int rc = 0;
|
||||
int i;
|
||||
struct sde_encoder_phys *phys;
|
||||
u32 qsync_mode = 0;
|
||||
|
||||
if (!drm_enc || !drm_enc->dev) {
|
||||
@@ -1940,16 +1990,9 @@ static int _sde_encoder_update_rsc_client(
|
||||
* secondary command mode panel.
|
||||
* Clone mode encoder can request CLK STATE only.
|
||||
*/
|
||||
for (i = 0; i < sde_enc->num_phys_encs; i++) {
|
||||
phys = sde_enc->phys_encs[i];
|
||||
|
||||
if (phys) {
|
||||
qsync_mode = sde_connector_get_property(
|
||||
phys->connector->state,
|
||||
CONNECTOR_PROP_QSYNC_MODE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (sde_enc->cur_master)
|
||||
qsync_mode = sde_connector_get_qsync_mode(
|
||||
sde_enc->cur_master->connector);
|
||||
|
||||
if (sde_encoder_in_clone_mode(drm_enc) || !disp_info->is_primary ||
|
||||
(disp_info->is_primary && qsync_mode))
|
||||
@@ -2137,11 +2180,11 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
|
||||
_sde_encoder_irq_control(drm_enc, true);
|
||||
|
||||
if (is_cmd_mode)
|
||||
_sde_encoder_pm_qos_add_request(drm_enc);
|
||||
_sde_encoder_pm_qos_add_request(drm_enc, sde_kms);
|
||||
|
||||
} else {
|
||||
if (is_cmd_mode)
|
||||
_sde_encoder_pm_qos_remove_request(drm_enc);
|
||||
_sde_encoder_pm_qos_remove_request(drm_enc, sde_kms);
|
||||
|
||||
/* disable all the irq */
|
||||
_sde_encoder_irq_control(drm_enc, false);
|
||||
@@ -2750,6 +2793,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
struct sde_connector_state *sde_conn_state = NULL;
|
||||
struct sde_connector *sde_conn = NULL;
|
||||
struct sde_rm_hw_iter dsc_iter, pp_iter;
|
||||
struct sde_rm_hw_request request_hw;
|
||||
int i = 0, ret;
|
||||
|
||||
if (!drm_enc) {
|
||||
@@ -2851,6 +2895,20 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
|
||||
}
|
||||
|
||||
/* Get PP for DSC configuration */
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
sde_enc->hw_dsc_pp[i] = NULL;
|
||||
if (!sde_enc->hw_dsc[i])
|
||||
continue;
|
||||
|
||||
request_hw.id = sde_enc->hw_dsc[i]->base.id;
|
||||
request_hw.type = SDE_HW_BLK_PINGPONG;
|
||||
if (!sde_rm_request_hw_blk(&sde_kms->rm, &request_hw))
|
||||
break;
|
||||
sde_enc->hw_dsc_pp[i] =
|
||||
(struct sde_hw_pingpong *) request_hw.hw;
|
||||
}
|
||||
|
||||
for (i = 0; i < sde_enc->num_phys_encs; i++) {
|
||||
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
|
||||
|
||||
@@ -3186,6 +3244,12 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
|
||||
phys->comp_ratio = comp_info->comp_ratio;
|
||||
phys->wide_bus_en = mode_info.wide_bus_en;
|
||||
phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
|
||||
if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
|
||||
phys->dsc_extra_pclk_cycle_cnt =
|
||||
comp_info->dsc_info.pclk_per_line;
|
||||
phys->dsc_extra_disp_width =
|
||||
comp_info->dsc_info.extra_width;
|
||||
}
|
||||
if (phys != sde_enc->cur_master) {
|
||||
/**
|
||||
* on DMS request, the encoder will be enabled
|
||||
@@ -3368,11 +3432,11 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
|
||||
sde_enc = to_sde_encoder_virt(phys_enc->parent);
|
||||
|
||||
if (phys_enc == sde_enc->cur_master && phys_enc->hw_pp &&
|
||||
phys_enc->hw_pp->merge_3d &&
|
||||
phys_enc->hw_ctl->ops.reset_post_disable)
|
||||
phys_enc->hw_ctl->ops.reset_post_disable(
|
||||
phys_enc->hw_ctl, &phys_enc->intf_cfg_v1,
|
||||
phys_enc->hw_pp->merge_3d->idx);
|
||||
phys_enc->hw_pp->merge_3d ?
|
||||
phys_enc->hw_pp->merge_3d->idx : 0);
|
||||
|
||||
phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
|
||||
phys_enc->hw_ctl->ops.trigger_start(phys_enc->hw_ctl);
|
||||
@@ -3673,6 +3737,7 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
||||
unsigned long lock_flags;
|
||||
struct sde_encoder_virt *sde_enc;
|
||||
int pend_ret_fence_cnt;
|
||||
struct sde_connector *c_conn;
|
||||
|
||||
if (!drm_enc || !phys) {
|
||||
SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
|
||||
@@ -3681,6 +3746,7 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
||||
}
|
||||
|
||||
sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
c_conn = to_sde_connector(phys->connector);
|
||||
|
||||
if (!phys->hw_pp) {
|
||||
SDE_ERROR("invalid pingpong hw\n");
|
||||
@@ -3709,6 +3775,15 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
||||
|
||||
pend_ret_fence_cnt = atomic_read(&phys->pending_retire_fence_cnt);
|
||||
|
||||
/* perform peripheral flush on every frame update for dp dsc */
|
||||
if (phys->hw_intf && phys->hw_intf->cap->type == INTF_DP &&
|
||||
phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
|
||||
phys->comp_ratio && ctl->ops.update_bitmask_periph &&
|
||||
c_conn->ops.update_pps) {
|
||||
c_conn->ops.update_pps(phys->connector, NULL, c_conn->display);
|
||||
ctl->ops.update_bitmask_periph(ctl, phys->hw_intf->idx, 1);
|
||||
}
|
||||
|
||||
if ((extra_flush && extra_flush->pending_flush_mask)
|
||||
&& ctl->ops.update_pending_flush)
|
||||
ctl->ops.update_pending_flush(ctl, extra_flush);
|
||||
@@ -3804,50 +3879,6 @@ void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
|
||||
}
|
||||
}
|
||||
|
||||
static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
|
||||
s64 timeout_ms, struct sde_encoder_wait_info *info)
|
||||
{
|
||||
int rc = 0;
|
||||
s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
|
||||
ktime_t cur_ktime;
|
||||
ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
|
||||
|
||||
do {
|
||||
rc = wait_event_timeout(*(info->wq),
|
||||
atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
|
||||
cur_ktime = ktime_get();
|
||||
|
||||
SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
|
||||
timeout_ms, atomic_read(info->atomic_cnt));
|
||||
/* If we timed out, counter is valid and time is less, wait again */
|
||||
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
|
||||
(ktime_compare_safe(exp_ktime, cur_ktime) > 0));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id,
|
||||
struct sde_encoder_wait_info *info)
|
||||
{
|
||||
int rc;
|
||||
ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms);
|
||||
|
||||
rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info);
|
||||
|
||||
/**
|
||||
* handle disabled irq case where timer irq is also delayed.
|
||||
* wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS
|
||||
* if it event_timeout expired late detected.
|
||||
*/
|
||||
if (atomic_read(info->atomic_cnt) && (!rc) &&
|
||||
(ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime,
|
||||
FAULT_TOLERENCE_DELTA_IN_MS)) > 0))
|
||||
rc = _sde_encoder_wait_timeout(drm_id, hw_id,
|
||||
FAULT_TOLERENCE_WAIT_IN_MS, info);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc;
|
||||
@@ -4539,6 +4570,40 @@ static int _helper_flush_qsync(struct sde_encoder_phys *phys_enc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool _sde_encoder_dsc_is_dirty(struct sde_encoder_virt *sde_enc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
/**
|
||||
* This dirty_dsc_hw field is set during DSC disable to
|
||||
* indicate which DSC blocks need to be flushed
|
||||
*/
|
||||
if (sde_enc->dirty_dsc_ids[i])
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void _helper_flush_dsc(struct sde_encoder_virt *sde_enc)
|
||||
{
|
||||
int i;
|
||||
struct sde_hw_ctl *hw_ctl = NULL;
|
||||
enum sde_dsc dsc_idx;
|
||||
|
||||
if (sde_enc->cur_master)
|
||||
hw_ctl = sde_enc->cur_master->hw_ctl;
|
||||
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
|
||||
dsc_idx = sde_enc->dirty_dsc_ids[i];
|
||||
if (dsc_idx && hw_ctl && hw_ctl->ops.update_bitmask_dsc)
|
||||
hw_ctl->ops.update_bitmask_dsc(hw_ctl, dsc_idx, 1);
|
||||
|
||||
sde_enc->dirty_dsc_ids[i] = DSC_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static void _sde_encoder_needs_hw_reset(struct drm_encoder *drm_enc,
|
||||
int ln_cnt1)
|
||||
{
|
||||
@@ -4591,6 +4656,12 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
|
||||
ln_cnt1 = sde_enc->cur_master->ops.get_wr_line_count(
|
||||
sde_enc->cur_master);
|
||||
|
||||
/* update the qsync parameters for the current frame */
|
||||
if (sde_enc->cur_master)
|
||||
sde_connector_set_qsync_params(
|
||||
sde_enc->cur_master->connector);
|
||||
|
||||
|
||||
if (sde_enc->cur_master && sde_enc->cur_master->connector &&
|
||||
disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
|
||||
sde_enc->frame_trigger_mode = sde_connector_get_property(
|
||||
@@ -4616,8 +4687,8 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
|
||||
needs_hw_reset = true;
|
||||
_sde_encoder_setup_dither(phys);
|
||||
|
||||
/* flush the mixer if qsync is enabled */
|
||||
if (sde_enc->cur_master && sde_connector_qsync_updated(
|
||||
if (sde_enc->cur_master &&
|
||||
sde_connector_is_qsync_updated(
|
||||
sde_enc->cur_master->connector)) {
|
||||
_helper_flush_qsync(phys);
|
||||
}
|
||||
@@ -4656,6 +4727,8 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
|
||||
SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
|
||||
ret = rc;
|
||||
}
|
||||
} else if (_sde_encoder_dsc_is_dirty(sde_enc)) {
|
||||
_helper_flush_dsc(sde_enc);
|
||||
}
|
||||
|
||||
end:
|
||||
@@ -4733,7 +4806,6 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
|
||||
}
|
||||
|
||||
if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
|
||||
sde_enc->disp_info.is_primary &&
|
||||
!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
|
||||
SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
|
||||
mod_timer(&sde_enc->vsync_event_timer,
|
||||
@@ -5379,8 +5451,7 @@ struct drm_encoder *sde_encoder_init(
|
||||
drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL);
|
||||
drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
|
||||
|
||||
if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
|
||||
disp_info->is_primary)
|
||||
if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
|
||||
timer_setup(&sde_enc->vsync_event_timer,
|
||||
sde_encoder_vsync_event_handler, 0);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
@@ -278,7 +278,7 @@ void sde_encoder_recovery_events_handler(struct drm_encoder *encoder,
|
||||
* @drm_enc: Pointer to drm encoder structure
|
||||
* @Return: true if successful in updating the encoder structure
|
||||
*/
|
||||
int sde_encoder_in_clone_mode(struct drm_encoder *enc);
|
||||
bool sde_encoder_in_clone_mode(struct drm_encoder *enc);
|
||||
|
||||
/**
|
||||
* sde_encoder_is_primary_display - checks if underlying display is primary
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SDE_ENCODER_PHYS_H__
|
||||
@@ -259,6 +259,8 @@ struct sde_encoder_irq {
|
||||
* path supports SDE_CTL_ACTIVE_CFG
|
||||
* @comp_type: Type of compression supported
|
||||
* @comp_ratio: Compression ratio
|
||||
* @dsc_extra_pclk_cycle_cnt: Extra pclk cycle count for DSC over DP
|
||||
* @dsc_extra_disp_width: Additional display width for DSC over DP
|
||||
* @wide_bus_en: Wide-bus configuraiton
|
||||
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
|
||||
* @enable_state: Enable state tracking
|
||||
@@ -305,6 +307,8 @@ struct sde_encoder_phys {
|
||||
struct sde_hw_intf_cfg_v1 intf_cfg_v1;
|
||||
enum msm_display_compression_type comp_type;
|
||||
enum msm_display_compression_ratio comp_ratio;
|
||||
u32 dsc_extra_pclk_cycle_cnt;
|
||||
u32 dsc_extra_disp_width;
|
||||
bool wide_bus_en;
|
||||
spinlock_t *enc_spinlock;
|
||||
enum sde_enc_enable_state enable_state;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -122,7 +122,6 @@ static void _sde_encoder_phys_cmd_update_flush_mask(
|
||||
{
|
||||
struct sde_encoder_phys_cmd *cmd_enc;
|
||||
struct sde_hw_ctl *ctl;
|
||||
bool merge_3d_enable = false;
|
||||
|
||||
if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_pp)
|
||||
return;
|
||||
@@ -140,16 +139,11 @@ static void _sde_encoder_phys_cmd_update_flush_mask(
|
||||
return;
|
||||
}
|
||||
|
||||
if (sde_encoder_helper_get_3d_blend_mode(phys_enc) != BLEND_3D_NONE)
|
||||
merge_3d_enable = true;
|
||||
|
||||
ctl->ops.update_bitmask_intf(ctl, phys_enc->intf_idx, 1);
|
||||
|
||||
|
||||
if (test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
|
||||
phys_enc->hw_pp->merge_3d)
|
||||
if (ctl->ops.update_bitmask_merge3d && phys_enc->hw_pp->merge_3d)
|
||||
ctl->ops.update_bitmask_merge3d(ctl,
|
||||
phys_enc->hw_pp->merge_3d->idx, merge_3d_enable);
|
||||
phys_enc->hw_pp->merge_3d->idx, 1);
|
||||
|
||||
SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d intf_idx %x\n",
|
||||
ctl->idx - CTL_0, phys_enc->intf_idx);
|
||||
@@ -908,8 +902,7 @@ static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc,
|
||||
return 0;
|
||||
|
||||
mode = &phys_enc->cached_mode;
|
||||
qsync_mode = sde_connector_get_property(
|
||||
conn->state, CONNECTOR_PROP_QSYNC_MODE);
|
||||
qsync_mode = sde_connector_get_qsync_mode(conn);
|
||||
|
||||
if (mode && (qsync_mode == SDE_RM_QSYNC_CONTINUOUS_MODE)) {
|
||||
u32 qsync_min_fps = 0;
|
||||
@@ -1369,7 +1362,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
|
||||
}
|
||||
}
|
||||
|
||||
if (sde_connector_qsync_updated(phys_enc->connector)) {
|
||||
if (sde_connector_is_qsync_updated(phys_enc->connector)) {
|
||||
tc_cfg.sync_threshold_start =
|
||||
_get_tearcheck_threshold(phys_enc,
|
||||
&extra_frame_trigger_time);
|
||||
@@ -1429,6 +1422,25 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
|
||||
"ctl start interrupt wait failed\n");
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (sde_encoder_phys_cmd_is_master(phys_enc)) {
|
||||
/*
|
||||
* Signaling the retire fence at ctl start timeout
|
||||
* to allow the next commit and avoid device freeze.
|
||||
* As ctl start timeout can occurs due to no read ptr,
|
||||
* updating pending_rd_ptr_cnt here may not cover all
|
||||
* cases. Hence signaling the retire fence.
|
||||
*/
|
||||
if (atomic_add_unless(
|
||||
&phys_enc->pending_retire_fence_cnt, -1, 0))
|
||||
phys_enc->parent_ops.handle_frame_done(
|
||||
phys_enc->parent,
|
||||
phys_enc,
|
||||
SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
|
||||
atomic_add_unless(
|
||||
&phys_enc->pending_ctlstart_cnt, -1, 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -82,7 +82,8 @@ static void drm_mode_to_intf_timing_params(
|
||||
*/
|
||||
timing->width = mode->hdisplay; /* active width */
|
||||
|
||||
if (vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
|
||||
if (phys_enc->hw_intf->cap->type != INTF_DP &&
|
||||
vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
|
||||
comp_ratio = vid_enc->base.comp_ratio;
|
||||
if (comp_ratio == MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1)
|
||||
timing->width = DIV_ROUND_UP(timing->width, 2);
|
||||
@@ -105,6 +106,7 @@ static void drm_mode_to_intf_timing_params(
|
||||
timing->underflow_clr = 0xff;
|
||||
timing->hsync_skew = mode->hskew;
|
||||
timing->v_front_porch_fixed = vid_enc->base.vfp_cached;
|
||||
timing->compression_en = false;
|
||||
|
||||
/* DSI controller cannot handle active-low sync signals. */
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DSI) {
|
||||
@@ -112,8 +114,41 @@ static void drm_mode_to_intf_timing_params(
|
||||
timing->vsync_polarity = 0;
|
||||
}
|
||||
|
||||
/* for DP/EDP, Shift timings to align it to bottom right */
|
||||
if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
|
||||
(phys_enc->hw_intf->cap->type == INTF_EDP)) {
|
||||
timing->h_back_porch += timing->h_front_porch;
|
||||
timing->h_front_porch = 0;
|
||||
timing->v_back_porch += timing->v_front_porch;
|
||||
timing->v_front_porch = 0;
|
||||
}
|
||||
|
||||
timing->wide_bus_en = vid_enc->base.wide_bus_en;
|
||||
|
||||
/*
|
||||
* for DP, divide the horizonal parameters by 2 when
|
||||
* widebus or compression is enabled, irrespective of
|
||||
* compression ratio
|
||||
*/
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DP &&
|
||||
(timing->wide_bus_en || vid_enc->base.comp_ratio)) {
|
||||
timing->width = timing->width >> 1;
|
||||
timing->xres = timing->xres >> 1;
|
||||
timing->h_back_porch = timing->h_back_porch >> 1;
|
||||
timing->h_front_porch = timing->h_front_porch >> 1;
|
||||
timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
|
||||
|
||||
if (vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
|
||||
vid_enc->base.comp_ratio) {
|
||||
timing->compression_en = true;
|
||||
timing->extra_dto_cycles =
|
||||
vid_enc->base.dsc_extra_pclk_cycle_cnt;
|
||||
timing->width += vid_enc->base.dsc_extra_disp_width;
|
||||
timing->h_back_porch +=
|
||||
vid_enc->base.dsc_extra_disp_width;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For edp only:
|
||||
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
|
||||
@@ -325,9 +360,8 @@ static void _sde_encoder_phys_vid_avr_ctrl(struct sde_encoder_phys *phys_enc)
|
||||
struct sde_encoder_phys_vid *vid_enc =
|
||||
to_sde_encoder_phys_vid(phys_enc);
|
||||
|
||||
avr_params.avr_mode = sde_connector_get_property(
|
||||
phys_enc->connector->state,
|
||||
CONNECTOR_PROP_QSYNC_MODE);
|
||||
avr_params.avr_mode = sde_connector_get_qsync_mode(
|
||||
phys_enc->connector);
|
||||
|
||||
if (vid_enc->base.hw_intf->ops.avr_ctrl) {
|
||||
vid_enc->base.hw_intf->ops.avr_ctrl(
|
||||
@@ -418,7 +452,8 @@ static void sde_encoder_phys_vid_setup_timing_engine(
|
||||
&intf_cfg);
|
||||
}
|
||||
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
|
||||
programmable_fetch_config(phys_enc, &timing_params);
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DSI)
|
||||
programmable_fetch_config(phys_enc, &timing_params);
|
||||
|
||||
exit:
|
||||
if (phys_enc->parent_ops.get_qsync_fps)
|
||||
@@ -701,7 +736,6 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
|
||||
struct sde_encoder_phys_vid *vid_enc;
|
||||
struct sde_hw_intf *intf;
|
||||
struct sde_hw_ctl *ctl;
|
||||
bool merge_3d_enable = false;
|
||||
|
||||
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
|
||||
!phys_enc->parent->dev->dev_private ||
|
||||
@@ -759,15 +793,16 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
|
||||
goto skip_flush;
|
||||
}
|
||||
|
||||
if (sde_encoder_helper_get_3d_blend_mode(phys_enc) != BLEND_3D_NONE)
|
||||
merge_3d_enable = true;
|
||||
|
||||
ctl->ops.update_bitmask_intf(ctl, intf->idx, 1);
|
||||
|
||||
if (test_bit(SDE_CTL_ACTIVE_CFG, &ctl->caps->features) &&
|
||||
phys_enc->hw_pp->merge_3d)
|
||||
if (ctl->ops.update_bitmask_merge3d && phys_enc->hw_pp->merge_3d)
|
||||
ctl->ops.update_bitmask_merge3d(ctl,
|
||||
phys_enc->hw_pp->merge_3d->idx, merge_3d_enable);
|
||||
phys_enc->hw_pp->merge_3d->idx, 1);
|
||||
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DP &&
|
||||
phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
|
||||
phys_enc->comp_ratio && ctl->ops.update_bitmask_periph)
|
||||
ctl->ops.update_bitmask_periph(ctl, intf->idx, 1);
|
||||
|
||||
skip_flush:
|
||||
SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d intf %d\n",
|
||||
@@ -933,7 +968,7 @@ static int sde_encoder_phys_vid_prepare_for_kickoff(
|
||||
vid_enc->error_count = 0;
|
||||
}
|
||||
|
||||
if (sde_connector_qsync_updated(phys_enc->connector))
|
||||
if (sde_connector_is_qsync_updated(phys_enc->connector))
|
||||
_sde_encoder_phys_vid_avr_ctrl(phys_enc);
|
||||
|
||||
return rc;
|
||||
@@ -1070,9 +1105,7 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
|
||||
phys_enc->enable_state = SDE_ENC_ENABLED;
|
||||
}
|
||||
|
||||
avr_mode = sde_connector_get_property(
|
||||
phys_enc->connector->state,
|
||||
CONNECTOR_PROP_QSYNC_MODE);
|
||||
avr_mode = sde_connector_get_qsync_mode(phys_enc->connector);
|
||||
|
||||
if (avr_mode && vid_enc->base.hw_intf->ops.avr_trigger) {
|
||||
vid_enc->base.hw_intf->ops.avr_trigger(vid_enc->base.hw_intf);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -635,6 +635,7 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
|
||||
{
|
||||
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
|
||||
struct sde_rect wb_roi = {0,};
|
||||
struct sde_rect pu_roi = {0,};
|
||||
int data_pt;
|
||||
int ds_outw = 0;
|
||||
int ds_outh = 0;
|
||||
@@ -673,12 +674,11 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
|
||||
}
|
||||
|
||||
/* validate conn roi against pu rect */
|
||||
if (!sde_kms_rect_is_null(&cstate->crtc_roi)) {
|
||||
if (wb_roi.w != cstate->crtc_roi.w ||
|
||||
wb_roi.h != cstate->crtc_roi.h) {
|
||||
if (cstate->user_roi_list.num_rects) {
|
||||
sde_kms_rect_merge_rectangles(&cstate->user_roi_list, &pu_roi);
|
||||
if (wb_roi.w != pu_roi.w || wb_roi.h != pu_roi.h) {
|
||||
SDE_ERROR("invalid wb roi with pu [%dx%d vs %dx%d]\n",
|
||||
wb_roi.w, wb_roi.h, cstate->crtc_roi.w,
|
||||
cstate->crtc_roi.h);
|
||||
wb_roi.w, wb_roi.h, pu_roi.w, pu_roi.h);
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
@@ -865,7 +865,7 @@ static void _sde_encoder_phys_wb_update_cwb_flush(
|
||||
dspp_out = (cwb_capture_mode == CAPTURE_DSPP_OUT);
|
||||
need_merge = (crtc->num_mixers > 1) ? true : false;
|
||||
|
||||
if (src_pp_idx > LM_0 || ((cwb_idx + crtc->num_mixers) > CWB_MAX)) {
|
||||
if (src_pp_idx > CWB_0 || ((cwb_idx + crtc->num_mixers) > CWB_MAX)) {
|
||||
SDE_ERROR("invalid hw config for CWB\n");
|
||||
return;
|
||||
}
|
||||
@@ -1553,13 +1553,21 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
|
||||
if (phys_enc->hw_ctl->ops.clear_pending_flush)
|
||||
phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
|
||||
|
||||
sde_encoder_helper_phys_disable(phys_enc, wb_enc);
|
||||
/*
|
||||
* New CTL reset sequence from 5.0 MDP onwards.
|
||||
* If has_3d_merge_reset is not set, legacy reset
|
||||
* sequence is executed.
|
||||
*/
|
||||
if (hw_wb->catalog->has_3d_merge_reset) {
|
||||
sde_encoder_helper_phys_disable(phys_enc, wb_enc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
|
||||
goto exit;
|
||||
|
||||
phys_enc->enable_state = SDE_ENC_DISABLING;
|
||||
|
||||
if (hw_wb->catalog->has_3d_merge_reset)
|
||||
goto exit;
|
||||
|
||||
sde_encoder_phys_wb_prepare_for_kickoff(phys_enc, NULL);
|
||||
sde_encoder_phys_wb_irq_ctrl(phys_enc, true);
|
||||
if (phys_enc->hw_ctl->ops.trigger_flush)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -206,6 +206,7 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
|
||||
sde_fence->ctx->name, val);
|
||||
dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
|
||||
ctx->context, val);
|
||||
kref_get(&ctx->kref);
|
||||
|
||||
/* create fd */
|
||||
fd = get_unused_fd_flags(0);
|
||||
@@ -228,7 +229,6 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
|
||||
|
||||
fd_install(fd, sync_file->file);
|
||||
sde_fence->fd = fd;
|
||||
kref_get(&ctx->kref);
|
||||
|
||||
spin_lock(&ctx->list_lock);
|
||||
list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
|
||||
|
||||
@@ -1068,15 +1068,18 @@ static int ad4_roi_coordinate_offset(struct sde_hw_cp_cfg *hw_cfg,
|
||||
/* the region occupy both sides of screen: left and right */
|
||||
if (hw_lm->cfg.right_mixer) {
|
||||
output->h_start = 0;
|
||||
output->h_end -= hw_lm->cfg.out_width;
|
||||
output->h_end -= (hw_lm->cfg.out_width -
|
||||
MERGE_WIDTH_RIGHT);
|
||||
} else {
|
||||
output->h_end = hw_lm->cfg.out_width;
|
||||
}
|
||||
} else {
|
||||
/* the region on the right of the screen*/
|
||||
if (hw_lm->cfg.right_mixer) {
|
||||
output->h_start -= hw_lm->cfg.out_width;
|
||||
output->h_end -= hw_lm->cfg.out_width;
|
||||
output->h_start -= (hw_lm->cfg.out_width -
|
||||
MERGE_WIDTH_RIGHT);
|
||||
output->h_end -= (hw_lm->cfg.out_width -
|
||||
MERGE_WIDTH_RIGHT);
|
||||
} else {
|
||||
output->h_start = 0;
|
||||
output->h_end = 0;
|
||||
|
||||
@@ -3790,10 +3790,24 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
|
||||
sde_cfg->sui_misr_supported = true;
|
||||
sde_cfg->has_decimation = true;
|
||||
sde_cfg->sui_block_xin_mask = 0x2EE1;
|
||||
sde_cfg->has_3d_merge_reset = true;
|
||||
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
|
||||
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
|
||||
sde_cfg->has_hdr = true;
|
||||
sde_cfg->has_vig_p010 = true;
|
||||
} else if (IS_SDMMAGPIE_TARGET(hw_rev)) {
|
||||
sde_cfg->has_cwb_support = true;
|
||||
sde_cfg->has_wb_ubwc = true;
|
||||
sde_cfg->has_qsync = true;
|
||||
sde_cfg->perf.min_prefill_lines = 24;
|
||||
sde_cfg->vbif_qos_nlvl = 8;
|
||||
sde_cfg->ts_prefill_rev = 2;
|
||||
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
|
||||
sde_cfg->delay_prg_fetch_start = true;
|
||||
sde_cfg->sui_ns_allowed = true;
|
||||
sde_cfg->sui_misr_supported = true;
|
||||
sde_cfg->sui_block_xin_mask = 0xE71;
|
||||
sde_cfg->has_3d_merge_reset = true;
|
||||
} else if (IS_KONA_TARGET(hw_rev)) {
|
||||
sde_cfg->has_cwb_support = true;
|
||||
sde_cfg->has_wb_ubwc = true;
|
||||
@@ -3843,7 +3857,8 @@ static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
|
||||
if (!sde_cfg)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_SM8150_TARGET(hw_rev) || IS_SM6150_TARGET(hw_rev)) {
|
||||
if (IS_SM8150_TARGET(hw_rev) || IS_SM6150_TARGET(hw_rev) ||
|
||||
IS_SDMMAGPIE_TARGET(hw_rev)) {
|
||||
sde_cfg->sui_supported_blendstage =
|
||||
sde_cfg->max_mixer_blendstages - SDE_STAGE_0;
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
#define SDE_HW_VER_500 SDE_HW_VER(5, 0, 0) /* sm8150 v1.0 */
|
||||
#define SDE_HW_VER_501 SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */
|
||||
#define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
|
||||
#define SDE_HW_VER_520 SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
|
||||
#define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
|
||||
#define SDE_HW_VER_600 SDE_HW_VER(6, 0, 0) /* kona */
|
||||
|
||||
@@ -56,6 +57,7 @@
|
||||
#define IS_SDM670_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_410)
|
||||
#define IS_SM8150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500)
|
||||
#define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
|
||||
#define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520)
|
||||
#define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
|
||||
#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
|
||||
|
||||
@@ -1359,7 +1361,7 @@ static inline bool sde_hw_sspp_multirect_enabled(const struct sde_sspp_cfg *cfg)
|
||||
test_bit(SDE_SSPP_SMART_DMA_V2p5, &cfg->features);
|
||||
}
|
||||
|
||||
static inline sde_hw_intf_te_supported(const struct sde_mdss_cfg *sde_cfg)
|
||||
static inline bool sde_hw_intf_te_supported(const struct sde_mdss_cfg *sde_cfg)
|
||||
{
|
||||
return test_bit(SDE_INTF_TE, &(sde_cfg->intf[0].features));
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
@@ -1032,17 +1032,19 @@ static int sde_hw_ctl_reset_post_disable(struct sde_hw_ctl *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
/* disable and flush merge3d_blk */
|
||||
merge_3d_flush = BIT(merge_3d_idx - MERGE_3D_0);
|
||||
merge_3d_active &= ~BIT(merge_3d_idx - MERGE_3D_0);
|
||||
|
||||
if (merge_3d_idx) {
|
||||
/* disable and flush merge3d_blk */
|
||||
merge_3d_flush = BIT(merge_3d_idx - MERGE_3D_0);
|
||||
merge_3d_active &= ~BIT(merge_3d_idx - MERGE_3D_0);
|
||||
ctx->flush.pending_merge_3d_flush_mask = merge_3d_flush;
|
||||
SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
|
||||
}
|
||||
sde_hw_ctl_clear_all_blendstages(ctx);
|
||||
|
||||
ctx->flush.pending_merge_3d_flush_mask = merge_3d_flush;
|
||||
ctx->flush.pending_intf_flush_mask = intf_flush;
|
||||
ctx->flush.pending_wb_flush_mask = wb_flush;
|
||||
|
||||
SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
|
||||
|
||||
SDE_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
|
||||
SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
|
||||
|
||||
|
||||
@@ -191,6 +191,8 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
|
||||
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
|
||||
u32 panel_format;
|
||||
u32 intf_cfg, intf_cfg2;
|
||||
u32 display_data_hctl = 0, active_data_hctl = 0;
|
||||
bool dp_intf = false;
|
||||
|
||||
/* read interface_cfg */
|
||||
intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
|
||||
@@ -204,14 +206,12 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
|
||||
display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
|
||||
p->hsync_skew - 1;
|
||||
|
||||
if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
|
||||
display_v_start += p->hsync_pulse_width + p->h_back_porch;
|
||||
display_v_end -= p->h_front_porch;
|
||||
}
|
||||
|
||||
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
|
||||
hsync_end_x = hsync_period - p->h_front_porch - 1;
|
||||
|
||||
if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP)
|
||||
dp_intf = true;
|
||||
|
||||
if (p->width != p->xres) {
|
||||
active_h_start = hsync_start_x;
|
||||
active_h_end = active_h_start + p->xres - 1;
|
||||
@@ -241,10 +241,36 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
|
||||
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
|
||||
display_hctl = (hsync_end_x << 16) | hsync_start_x;
|
||||
|
||||
if (dp_intf) {
|
||||
active_h_start = hsync_start_x;
|
||||
active_h_end = active_h_start + p->xres - 1;
|
||||
active_v_start = display_v_start;
|
||||
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
|
||||
|
||||
display_v_start += p->hsync_pulse_width + p->h_back_porch;
|
||||
|
||||
active_hctl = (active_h_end << 16) | active_h_start;
|
||||
display_hctl = active_hctl;
|
||||
}
|
||||
|
||||
intf_cfg2 = 0;
|
||||
|
||||
if (dp_intf && p->compression_en) {
|
||||
active_data_hctl = (hsync_start_x + p->extra_dto_cycles) << 16;
|
||||
active_data_hctl += hsync_start_x;
|
||||
|
||||
display_data_hctl = active_data_hctl;
|
||||
|
||||
intf_cfg2 |= BIT(4);
|
||||
}
|
||||
|
||||
den_polarity = 0;
|
||||
if (ctx->cap->type == INTF_HDMI) {
|
||||
hsync_polarity = p->yres >= 720 ? 0 : 1;
|
||||
vsync_polarity = p->yres >= 720 ? 0 : 1;
|
||||
} else if (ctx->cap->type == INTF_DP) {
|
||||
hsync_polarity = p->hsync_polarity;
|
||||
vsync_polarity = p->vsync_polarity;
|
||||
} else {
|
||||
hsync_polarity = 0;
|
||||
vsync_polarity = 0;
|
||||
@@ -265,7 +291,6 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
|
||||
(COLOR_8BIT << 4) |
|
||||
(0x21 << 8));
|
||||
|
||||
intf_cfg2 = 0;
|
||||
if (p->wide_bus_en)
|
||||
intf_cfg2 |= BIT(0);
|
||||
|
||||
@@ -287,6 +312,8 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
|
||||
SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
|
||||
SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
|
||||
SDE_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
|
||||
SDE_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
|
||||
SDE_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
|
||||
}
|
||||
|
||||
static void sde_hw_intf_enable_timing_engine(
|
||||
|
||||
@@ -32,7 +32,9 @@ struct intf_timing_params {
|
||||
u32 underflow_clr;
|
||||
u32 hsync_skew;
|
||||
u32 v_front_porch_fixed;
|
||||
bool wide_bus_en;
|
||||
bool wide_bus_en; /* for DP only */
|
||||
bool compression_en; /* for DP only */
|
||||
u32 extra_dto_cycles; /* for DP only */
|
||||
};
|
||||
|
||||
struct intf_prog_fetch {
|
||||
|
||||
@@ -284,6 +284,7 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
|
||||
IS_SM8150_TARGET(m->hwversion) ||
|
||||
IS_SDMSHRIKE_TARGET(m->hwversion) ||
|
||||
IS_SM6150_TARGET(m->hwversion) ||
|
||||
IS_SDMMAGPIE_TARGET(m->hwversion) ||
|
||||
IS_KONA_TARGET(m->hwversion))
|
||||
ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
|
||||
else
|
||||
|
||||
@@ -139,6 +139,7 @@ static u32 sspp_feature_map[SDE_SSPP_MAX] = {
|
||||
[SDE_SSPP_DMA_IGC] = IGC,
|
||||
[SDE_SSPP_DMA_GC] = GC,
|
||||
[SDE_SSPP_SCALER_QSEED3] = QSEED,
|
||||
[SDE_SSPP_SCALER_QSEED3LITE] = REG_DMA_FEATURES_MAX,
|
||||
};
|
||||
|
||||
static u32 ltm_feature_map[SDE_LTM_MAX] = {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "sde_hwio.h"
|
||||
@@ -259,6 +259,31 @@ static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
|
||||
SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
|
||||
}
|
||||
|
||||
static void sde_hw_sspp_set_src_split_order(struct sde_hw_pipe *ctx,
|
||||
enum sde_sspp_multirect_index rect_mode, bool enable)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
u32 opmode, idx, op_mode_off;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
|
||||
return;
|
||||
|
||||
if (rect_mode == SDE_SSPP_RECT_SOLO || rect_mode == SDE_SSPP_RECT_0)
|
||||
op_mode_off = SSPP_SRC_OP_MODE;
|
||||
else
|
||||
op_mode_off = SSPP_SRC_OP_MODE_REC1;
|
||||
|
||||
c = &ctx->hw;
|
||||
opmode = SDE_REG_READ(c, op_mode_off + idx);
|
||||
|
||||
if (enable)
|
||||
opmode |= MDSS_MDP_OP_SPLIT_ORDER;
|
||||
else
|
||||
opmode &= ~MDSS_MDP_OP_SPLIT_ORDER;
|
||||
|
||||
SDE_REG_WRITE(c, op_mode_off + idx, opmode);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup source pixel format, flip,
|
||||
*/
|
||||
@@ -290,15 +315,12 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
|
||||
c = &ctx->hw;
|
||||
opmode = SDE_REG_READ(c, op_mode_off + idx);
|
||||
opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
|
||||
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE |
|
||||
MDSS_MDP_OP_SPLIT_ORDER);
|
||||
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
|
||||
|
||||
if (flags & SDE_SSPP_FLIP_LR)
|
||||
opmode |= MDSS_MDP_OP_FLIP_LR;
|
||||
if (flags & SDE_SSPP_FLIP_UD)
|
||||
opmode |= MDSS_MDP_OP_FLIP_UD;
|
||||
if ((flags & SDE_SSPP_RIGHT) && ctx->catalog->pipe_order_type)
|
||||
opmode |= MDSS_MDP_OP_SPLIT_ORDER;
|
||||
|
||||
chroma_samp = fmt->chroma_sample;
|
||||
if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
|
||||
@@ -1152,6 +1174,7 @@ static void _setup_layer_ops(struct sde_hw_pipe *c,
|
||||
c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
|
||||
c->ops.setup_pe = sde_hw_sspp_setup_pe_config;
|
||||
c->ops.setup_secure_address = sde_hw_sspp_setup_secure;
|
||||
c->ops.set_src_split_order = sde_hw_sspp_set_src_split_order;
|
||||
}
|
||||
|
||||
if (test_bit(SDE_SSPP_EXCL_RECT, &features))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _SDE_HW_SSPP_H
|
||||
@@ -570,6 +570,15 @@ struct sde_hw_sspp_ops {
|
||||
enum sde_sspp_multirect_index index,
|
||||
bool enable);
|
||||
|
||||
/**
|
||||
* set_src_split_order - setup source split order priority
|
||||
* @ctx: Pointer to pipe context
|
||||
* @index: rectangle index in multirect
|
||||
* @enable: enable src split order
|
||||
*/
|
||||
void (*set_src_split_order)(struct sde_hw_pipe *ctx,
|
||||
enum sde_sspp_multirect_index index, bool enable);
|
||||
|
||||
/**
|
||||
* setup_inverse_pma - enable/disable alpha unmultiply unit (PMA)
|
||||
* @ctx: Pointer to pipe context
|
||||
|
||||
@@ -52,6 +52,8 @@ static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
|
||||
/* SDE_SCALER_QSEED3LITE */
|
||||
#define QSEED3L_COEF_LUT_Y_SEP_BIT 4
|
||||
#define QSEED3L_COEF_LUT_UV_SEP_BIT 5
|
||||
#define QSEED3L_COEF_LUT_CTRL 0x4C
|
||||
#define QSEED3L_COEF_LUT_SWAP_BIT 0
|
||||
#define QSEED3L_DIR_FILTER_WEIGHT 0x60
|
||||
#define QSEED3LITE_SCALER_VERSION 0x2004
|
||||
#define QSEED4_SCALER_VERSION 0x3000
|
||||
@@ -278,6 +280,9 @@ static void _sde_hw_setup_scaler3lite_lut(struct sde_hw_blk_reg_map *c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(QSEED3L_COEF_LUT_SWAP_BIT, &lut_flags))
|
||||
SDE_REG_WRITE(c, QSEED3L_COEF_LUT_CTRL + offset, BIT(0));
|
||||
}
|
||||
|
||||
static void _sde_hw_setup_scaler3_de(struct sde_hw_blk_reg_map *c,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "sde_hwio.h"
|
||||
@@ -234,7 +234,8 @@ static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
|
||||
ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
|
||||
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
|
||||
ops->set_qos_remap = sde_hw_set_qos_remap;
|
||||
if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion))
|
||||
if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
|
||||
IS_SDMMAGPIE_TARGET(m->hwversion))
|
||||
ops->set_mem_type = sde_hw_set_mem_type_v1;
|
||||
else
|
||||
ops->set_mem_type = sde_hw_set_mem_type;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "sde_irq.h"
|
||||
#include "sde_core_irq.h"
|
||||
|
||||
static uint32_t g_sde_irq_status;
|
||||
uint32_t g_sde_irq_status;
|
||||
|
||||
void sde_irq_update(struct msm_kms *msm_kms, bool enable)
|
||||
{
|
||||
|
||||
@@ -77,6 +77,9 @@ static const char * const iommu_ports[] = {
|
||||
#define SDE_DEBUGFS_DIR "msm_sde"
|
||||
#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
|
||||
|
||||
#define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
|
||||
#define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
|
||||
|
||||
/**
|
||||
* sdecustom - enable certain driver customizations for sde clients
|
||||
* Enabling this modifies the standard DRM behavior slightly and assumes
|
||||
@@ -1159,6 +1162,11 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
|
||||
SDE_ERROR("power resource is not enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc != crtc)
|
||||
@@ -1377,6 +1385,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
|
||||
.cmd_transfer = NULL,
|
||||
.cont_splash_config = NULL,
|
||||
.get_panel_vfp = NULL,
|
||||
.update_pps = dp_connector_update_pps,
|
||||
};
|
||||
struct msm_display_info info;
|
||||
struct drm_encoder *encoder;
|
||||
@@ -2061,7 +2070,7 @@ static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
|
||||
}
|
||||
|
||||
end:
|
||||
if ((ret != 0) && state)
|
||||
if (state)
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
SDE_DEBUG("sde preclose done, ret:%d\n", ret);
|
||||
@@ -2176,14 +2185,10 @@ static void sde_kms_lastclose(struct msm_kms *kms,
|
||||
SDE_DEBUG("deadlock backoff on attempt %d\n", i);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/**
|
||||
* on success, atomic state object ownership transfers to
|
||||
* framework, otherwise, free it here
|
||||
*/
|
||||
drm_atomic_state_put(state);
|
||||
if (ret)
|
||||
SDE_ERROR("failed to run last close: %d\n", ret);
|
||||
}
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
}
|
||||
|
||||
static int sde_kms_check_secure_transition(struct msm_kms *kms,
|
||||
@@ -2608,13 +2613,81 @@ static bool sde_kms_check_for_splash(struct msm_kms *kms)
|
||||
return sde_kms->splash_data.num_splash_displays;
|
||||
}
|
||||
|
||||
static void _sde_kms_null_commit(struct drm_device *dev,
|
||||
struct drm_encoder *enc)
|
||||
{
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct drm_connector *conn = NULL;
|
||||
struct drm_connector *tmp_conn = NULL;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_atomic_state *state = NULL;
|
||||
struct drm_crtc_state *crtc_state = NULL;
|
||||
struct drm_connector_state *conn_state = NULL;
|
||||
int retry_cnt = 0;
|
||||
int ret = 0;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
retry:
|
||||
ret = drm_modeset_lock_all_ctx(dev, &ctx);
|
||||
if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
|
||||
drm_modeset_backoff(&ctx);
|
||||
retry_cnt++;
|
||||
udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
|
||||
goto retry;
|
||||
} else if (WARN_ON(ret)) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state) {
|
||||
DRM_ERROR("failed to allocate atomic state, %d\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
state->acquire_ctx = &ctx;
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(tmp_conn, &conn_iter) {
|
||||
if (enc == tmp_conn->state->best_encoder) {
|
||||
conn = tmp_conn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
if (!conn) {
|
||||
SDE_ERROR("error in finding conn for enc:%d\n", DRMID(enc));
|
||||
goto end;
|
||||
}
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
if (IS_ERR(conn_state)) {
|
||||
SDE_ERROR("error %d getting connector %d state\n",
|
||||
ret, DRMID(conn));
|
||||
goto end;
|
||||
}
|
||||
|
||||
crtc_state->active = true;
|
||||
drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
|
||||
|
||||
drm_atomic_commit(state);
|
||||
end:
|
||||
if (state)
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static int sde_kms_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *ddev;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct drm_connector *conn;
|
||||
struct drm_encoder *enc;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_atomic_state *state = NULL;
|
||||
struct sde_kms *sde_kms;
|
||||
int ret = 0, num_crtcs = 0;
|
||||
|
||||
@@ -2631,6 +2704,12 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
/* disable hot-plug polling */
|
||||
drm_kms_helper_poll_disable(ddev);
|
||||
|
||||
/* if a display stuck in CS trigger a null commit to complete handoff */
|
||||
drm_for_each_encoder(enc, ddev) {
|
||||
if (sde_encoder_in_cont_splash(enc) && enc->crtc)
|
||||
_sde_kms_null_commit(ddev, enc);
|
||||
}
|
||||
|
||||
/* acquire modeset lock(s) */
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
@@ -2644,15 +2723,17 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
drm_atomic_state_put(sde_kms->suspend_state);
|
||||
sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
|
||||
if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
|
||||
DRM_ERROR("failed to back up suspend state\n");
|
||||
ret = PTR_ERR(sde_kms->suspend_state);
|
||||
DRM_ERROR("failed to back up suspend state, %d\n", ret);
|
||||
sde_kms->suspend_state = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* create atomic state to disable all CRTCs */
|
||||
state = drm_atomic_state_alloc(ddev);
|
||||
if (IS_ERR_OR_NULL(state)) {
|
||||
DRM_ERROR("failed to allocate crtc disable state\n");
|
||||
if (!state) {
|
||||
ret = -ENOMEM;
|
||||
DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -2674,7 +2755,7 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to set lp2 for conn %d\n",
|
||||
conn->base.id);
|
||||
drm_atomic_state_put(state);
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
@@ -2686,7 +2767,7 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
if (IS_ERR_OR_NULL(crtc_state)) {
|
||||
DRM_ERROR("failed to get crtc %d state\n",
|
||||
conn->state->crtc->base.id);
|
||||
drm_atomic_state_put(state);
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -2700,7 +2781,6 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
/* check for nothing to do */
|
||||
if (num_crtcs == 0) {
|
||||
DRM_DEBUG("all crtcs are already in the off state\n");
|
||||
drm_atomic_state_put(state);
|
||||
sde_kms->suspend_block = true;
|
||||
goto unlock;
|
||||
}
|
||||
@@ -2709,7 +2789,6 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
ret = drm_atomic_commit(state);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to disable crtcs, %d\n", ret);
|
||||
drm_atomic_state_put(state);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -2734,6 +2813,11 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
unlock:
|
||||
if (state) {
|
||||
drm_atomic_state_put(state);
|
||||
state = NULL;
|
||||
}
|
||||
|
||||
if (ret == -EDEADLK) {
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto retry;
|
||||
@@ -2741,7 +2825,7 @@ static int sde_kms_pm_suspend(struct device *dev)
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sde_kms_pm_resume(struct device *dev)
|
||||
@@ -2787,10 +2871,10 @@ static int sde_kms_pm_resume(struct device *dev)
|
||||
drm_modeset_backoff(&ctx);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret < 0)
|
||||
DRM_ERROR("failed to restore state, %d\n", ret);
|
||||
drm_atomic_state_put(sde_kms->suspend_state);
|
||||
}
|
||||
|
||||
drm_atomic_state_put(sde_kms->suspend_state);
|
||||
sde_kms->suspend_state = NULL;
|
||||
}
|
||||
|
||||
@@ -3432,7 +3516,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
|
||||
mutex_init(&sde_kms->secure_transition_lock);
|
||||
atomic_set(&sde_kms->detach_sec_cb, 0);
|
||||
atomic_set(&sde_kms->detach_all_cb, 0);
|
||||
atomic_set(&sde_kms->pm_qos_counts, 0);
|
||||
|
||||
/*
|
||||
* Support format modifiers for compression etc.
|
||||
|
||||
@@ -239,8 +239,6 @@ struct sde_kms {
|
||||
|
||||
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
|
||||
struct sde_power_client *core_client;
|
||||
struct pm_qos_request pm_qos_cpu_req;
|
||||
atomic_t pm_qos_counts;
|
||||
|
||||
struct sde_power_event *power_event;
|
||||
|
||||
|
||||
@@ -222,6 +222,20 @@ bool sde_plane_is_sec_ui_allowed(struct drm_plane *plane)
|
||||
return !(psde->features & BIT(SDE_SSPP_BLOCK_SEC_UI));
|
||||
}
|
||||
|
||||
void sde_plane_setup_src_split_order(struct drm_plane *plane,
|
||||
enum sde_sspp_multirect_index rect_mode, bool enable)
|
||||
{
|
||||
struct sde_plane *psde;
|
||||
|
||||
if (!plane)
|
||||
return;
|
||||
|
||||
psde = to_sde_plane(plane);
|
||||
if (psde->pipe_hw->ops.set_src_split_order)
|
||||
psde->pipe_hw->ops.set_src_split_order(psde->pipe_hw,
|
||||
rect_mode, enable);
|
||||
}
|
||||
|
||||
/**
|
||||
* _sde_plane_calc_fill_level - calculate fill level of the given source format
|
||||
* @plane: Pointer to drm plane
|
||||
@@ -1555,8 +1569,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
|
||||
if (psde->pipe_hw->ops.setup_format)
|
||||
psde->pipe_hw->ops.setup_format(psde->pipe_hw,
|
||||
fmt, blend_enable,
|
||||
SDE_SSPP_SOLID_FILL |
|
||||
pstate->pipe_order_flags,
|
||||
SDE_SSPP_SOLID_FILL,
|
||||
pstate->multirect_index);
|
||||
|
||||
if (psde->pipe_hw->ops.setup_rects)
|
||||
@@ -2953,9 +2966,8 @@ static void _sde_plane_update_roi_config(struct drm_plane *plane,
|
||||
static void _sde_plane_update_format_and_rects(struct sde_plane *psde,
|
||||
struct sde_plane_state *pstate, const struct sde_format *fmt)
|
||||
{
|
||||
uint32_t src_flags;
|
||||
uint32_t src_flags = 0;
|
||||
|
||||
src_flags = pstate->pipe_order_flags;
|
||||
SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", pstate->rotation);
|
||||
if (pstate->rotation & DRM_MODE_REFLECT_X)
|
||||
src_flags |= SDE_SSPP_FLIP_LR;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
@@ -285,6 +285,15 @@ u32 sde_plane_get_ubwc_error(struct drm_plane *plane);
|
||||
*/
|
||||
void sde_plane_clear_ubwc_error(struct drm_plane *plane);
|
||||
|
||||
/*
|
||||
* sde_plane_setup_src_split_order - enable/disable pipe's src_split_order
|
||||
* @plane: Pointer to DRM plane object
|
||||
* @rect_mode: multirect mode
|
||||
* @enable: enable/disable flag
|
||||
*/
|
||||
void sde_plane_setup_src_split_order(struct drm_plane *plane,
|
||||
enum sde_sspp_multirect_index rect_mode, bool enable);
|
||||
|
||||
/* sde_plane_is_cache_required - indicates if the system cache is
|
||||
* required for the plane.
|
||||
* @plane: Pointer to DRM plane object
|
||||
|
||||
@@ -232,6 +232,43 @@ static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool _sde_rm_request_hw_blk_locked(struct sde_rm *rm,
|
||||
struct sde_rm_hw_request *hw_blk_info)
|
||||
{
|
||||
struct list_head *blk_list;
|
||||
struct sde_rm_hw_blk *blk = NULL;
|
||||
|
||||
if (!rm || !hw_blk_info || hw_blk_info->type >= SDE_HW_BLK_MAX) {
|
||||
SDE_ERROR("invalid rm\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
hw_blk_info->hw = NULL;
|
||||
blk_list = &rm->hw_blks[hw_blk_info->type];
|
||||
|
||||
blk = list_prepare_entry(blk, blk_list, list);
|
||||
|
||||
list_for_each_entry_continue(blk, blk_list, list) {
|
||||
if (blk->type != hw_blk_info->type) {
|
||||
SDE_ERROR("found incorrect block type %d on %d list\n",
|
||||
blk->type, hw_blk_info->type);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (blk->hw->id == hw_blk_info->id) {
|
||||
hw_blk_info->hw = blk->hw;
|
||||
SDE_DEBUG("found type %d id %d\n",
|
||||
blk->type, blk->id);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
SDE_DEBUG("no match, type %d id %d\n", hw_blk_info->type,
|
||||
hw_blk_info->id);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
|
||||
{
|
||||
bool ret;
|
||||
@@ -243,6 +280,17 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
mutex_lock(&rm->rm_lock);
|
||||
ret = _sde_rm_request_hw_blk_locked(rm, hw);
|
||||
mutex_unlock(&rm->rm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
|
||||
{
|
||||
switch (type) {
|
||||
|
||||
@@ -133,6 +133,18 @@ struct sde_rm_hw_iter {
|
||||
enum sde_hw_blk_type type;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sde_rm_hw_request - data for requesting hw blk
|
||||
* @hw: sde_hw object requested, or NULL on failure
|
||||
* @type: Hardware Block Type client wishes to search for
|
||||
* @id: Hardware block id
|
||||
*/
|
||||
struct sde_rm_hw_request {
|
||||
void *hw;
|
||||
enum sde_hw_blk_type type;
|
||||
int id;
|
||||
};
|
||||
|
||||
/**
|
||||
* sde_rm_get_topology_name - get the name of the given topology config
|
||||
* @topology: msm_display_topology topology config
|
||||
@@ -227,6 +239,14 @@ void sde_rm_init_hw_iter(
|
||||
*/
|
||||
bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
|
||||
|
||||
/**
|
||||
* sde_rm_request_hw_blk - retrieve the requested hardware block
|
||||
* @rm: SDE Resource Manager handle
|
||||
* @hw: holds the input and output information of the requested hw block
|
||||
* @Return: true on match found, false on no match found
|
||||
*/
|
||||
bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw);
|
||||
|
||||
/**
|
||||
* sde_rm_check_property_topctl - validate property bitmask before it is set
|
||||
* @val: user's proposed topology control bitmask
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -3526,8 +3526,9 @@ void sde_dbg_ctrl(const char *name, ...)
|
||||
if (!strcmp(blk_name, "panic_underrun") &&
|
||||
sde_dbg_base.debugfs_ctrl &
|
||||
DBG_CTRL_PANIC_UNDERRUN) {
|
||||
pr_debug("panic underrun\n");
|
||||
panic("underrun");
|
||||
pr_err("panic underrun\n");
|
||||
SDE_DBG_DUMP_WQ("all", "dbg_bus", "vbif_dbg_bus",
|
||||
"panic");
|
||||
}
|
||||
|
||||
if (!strcmp(blk_name, "reset_hw_panic") &&
|
||||
@@ -4430,7 +4431,8 @@ void sde_dbg_init_dbg_buses(u32 hwversion)
|
||||
memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde));
|
||||
memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
|
||||
|
||||
if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion)) {
|
||||
if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion) ||
|
||||
IS_SDMMAGPIE_TARGET(hwversion)) {
|
||||
dbg->dbgbus_sde.entries = dbg_bus_sde_sm8150;
|
||||
dbg->dbgbus_sde.cmn.entries_size =
|
||||
ARRAY_SIZE(dbg_bus_sde_sm8150);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2015, 2017-2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2015, 2017-2019 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
@@ -206,11 +206,31 @@ if (type == DSS_REG_LDO)
|
||||
} /* msm_dss_config_vreg */
|
||||
EXPORT_SYMBOL(msm_dss_config_vreg);
|
||||
|
||||
static bool msm_dss_is_hw_controlled(struct dss_vreg in_vreg)
|
||||
{
|
||||
u32 mode = 0;
|
||||
char const *regulator_gdsc = "gdsc";
|
||||
|
||||
/*
|
||||
* For gdsc-regulator devices only, REGULATOR_MODE_FAST specifies that
|
||||
* the GDSC is in HW controlled mode.
|
||||
*/
|
||||
mode = regulator_get_mode(in_vreg.vreg);
|
||||
if (!strcmp(regulator_gdsc, in_vreg.vreg_name) &&
|
||||
mode == REGULATOR_MODE_FAST) {
|
||||
DEV_DBG("%pS->%s: %s is HW controlled\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
in_vreg.vreg_name);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
|
||||
{
|
||||
int i = 0, rc = 0;
|
||||
bool need_sleep;
|
||||
int reg_mode;
|
||||
|
||||
if (enable) {
|
||||
for (i = 0; i < num_vreg; i++) {
|
||||
@@ -221,17 +241,9 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
|
||||
in_vreg[i].vreg_name, rc);
|
||||
goto vreg_set_opt_mode_fail;
|
||||
}
|
||||
reg_mode = regulator_get_mode(in_vreg[i].vreg);
|
||||
if (reg_mode == REGULATOR_MODE_FAST) {
|
||||
DEV_DBG("%pS->%s: %s operation not allowed\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
in_vreg[i].vreg_name);
|
||||
/*
|
||||
* This regulator is controlled by Hw cannot be
|
||||
* controlled by Sw vote
|
||||
*/
|
||||
if (msm_dss_is_hw_controlled(in_vreg[i]))
|
||||
continue;
|
||||
}
|
||||
|
||||
need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
|
||||
if (in_vreg[i].pre_on_sleep && need_sleep)
|
||||
usleep_range(in_vreg[i].pre_on_sleep * 1000,
|
||||
@@ -257,17 +269,9 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
|
||||
}
|
||||
} else {
|
||||
for (i = num_vreg-1; i >= 0; i--) {
|
||||
reg_mode = regulator_get_mode(in_vreg[i].vreg);
|
||||
if (reg_mode == REGULATOR_MODE_FAST) {
|
||||
DEV_DBG("%pS->%s: %s operation not allowed\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
in_vreg[i].vreg_name);
|
||||
/*
|
||||
* This regulator is controlled by Hw cannot be
|
||||
* controlled by Sw vote
|
||||
*/
|
||||
if (msm_dss_is_hw_controlled(in_vreg[i]))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (in_vreg[i].pre_off_sleep)
|
||||
usleep_range(in_vreg[i].pre_off_sleep * 1000,
|
||||
(in_vreg[i].pre_off_sleep * 1000) + 10);
|
||||
@@ -381,27 +385,41 @@ int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
|
||||
} /* msm_dss_get_clk */
|
||||
EXPORT_SYMBOL(msm_dss_get_clk);
|
||||
|
||||
int msm_dss_single_clk_set_rate(struct dss_clk *clk)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!clk) {
|
||||
DEV_ERR("invalid clk struct\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DEV_DBG("%pS->%s: set_rate '%s'\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
clk->clk_name);
|
||||
|
||||
if (clk->type != DSS_CLK_AHB) {
|
||||
rc = clk_set_rate(clk->clk, clk->rate);
|
||||
if (rc)
|
||||
DEV_ERR("%pS->%s: %s failed. rc=%d\n",
|
||||
__builtin_return_address(0),
|
||||
__func__,
|
||||
clk->clk_name, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
} /* msm_dss_single_clk_set_rate */
|
||||
EXPORT_SYMBOL(msm_dss_single_clk_set_rate);
|
||||
|
||||
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
|
||||
{
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < num_clk; i++) {
|
||||
if (clk_arry[i].clk) {
|
||||
if (clk_arry[i].type != DSS_CLK_AHB) {
|
||||
DEV_DBG("%pS->%s: '%s' rate %ld\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
clk_arry[i].clk_name,
|
||||
clk_arry[i].rate);
|
||||
rc = clk_set_rate(clk_arry[i].clk,
|
||||
clk_arry[i].rate);
|
||||
if (rc) {
|
||||
DEV_ERR("%pS->%s: %s failed. rc=%d\n",
|
||||
__builtin_return_address(0),
|
||||
__func__,
|
||||
clk_arry[i].clk_name, rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rc = msm_dss_single_clk_set_rate(&clk_arry[i]);
|
||||
if (rc)
|
||||
break;
|
||||
} else {
|
||||
DEV_ERR("%pS->%s: '%s' is not available\n",
|
||||
__builtin_return_address(0), __func__,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
|
||||
@@ -1129,7 +1129,7 @@ int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
|
||||
sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
|
||||
requested_clk_rate, prev_clk_rate, true);
|
||||
mp->clk_config[i].rate = rate;
|
||||
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
|
||||
rc = msm_dss_single_clk_set_rate(&mp->clk_config[i]);
|
||||
if (!rc)
|
||||
sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
|
||||
requested_clk_rate, prev_clk_rate, false);
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#define SDE_RSC_WRAPPER_DBG_NAME "sde_rsc_wrapper"
|
||||
|
||||
#define SINGLE_TCS_EXECUTION_TIME_V1 1064000
|
||||
#define SINGLE_TCS_EXECUTION_TIME_V2 850000
|
||||
#define SINGLE_TCS_EXECUTION_TIME_V2 930000
|
||||
|
||||
#define RSC_MODE_INSTRUCTION_TIME 100
|
||||
#define RSC_MODE_THRESHOLD_OVERHEAD 2700
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012, 2017-2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SDE_IO_UTIL_H__
|
||||
@@ -96,6 +96,7 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable);
|
||||
int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
|
||||
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
|
||||
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
|
||||
int msm_dss_single_clk_set_rate(struct dss_clk *clk);
|
||||
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
|
||||
|
||||
int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
|
||||
|
||||
Reference in New Issue
Block a user