python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: MIT /* * Copyright 2019 Intel Corporation. */ #include "i915_drv.h" #include "i915_utils.h" #include "intel_pch.h" /* Map PCH device id to PCH type, or PCH_NONE if unknown. */ static enum intel_pch intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) { switch (id) { case INTEL_PCH_IBX_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n"); drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5); return PCH_IBX; case INTEL_PCH_CPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n"); drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv)); return PCH_CPT; case INTEL_PCH_PPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n"); drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv)); /* PPT is CPT compatible */ return PCH_CPT; case INTEL_PCH_LPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); drm_WARN_ON(&dev_priv->drm, IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_WPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); drm_WARN_ON(&dev_priv->drm, IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)); /* WPT is LPT compatible */ return PCH_LPT; case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); drm_WARN_ON(&dev_priv->drm, !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv)); /* WPT is LPT compatible */ return PCH_LPT; case INTEL_PCH_SPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n"); drm_WARN_ON(&dev_priv->drm, !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv)); /* KBP is SPT compatible */ return PCH_SPT; case INTEL_PCH_CNP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n"); drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake LP PCH (CNP-LP)\n"); drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CMP_DEVICE_ID_TYPE: case INTEL_PCH_CMP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n"); drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv) && !IS_ROCKETLAKE(dev_priv)); /* CMP is CNP compatible */ return PCH_CNP; case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n"); drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv) && !IS_COMETLAKE(dev_priv)); /* CMP-V is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n"); drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))); /* MCC is TGP compatible */ return PCH_TGP; case INTEL_PCH_TGP_DEVICE_ID_TYPE: case INTEL_PCH_TGP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) && !IS_ROCKETLAKE(dev_priv) && !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))); /* JSP is ICP compatible */ return PCH_ICP; case INTEL_PCH_ADP_DEVICE_ID_TYPE: case INTEL_PCH_ADP2_DEVICE_ID_TYPE: case INTEL_PCH_ADP3_DEVICE_ID_TYPE: case INTEL_PCH_ADP4_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); return PCH_ADP; case INTEL_PCH_MTP_DEVICE_ID_TYPE: case INTEL_PCH_MTP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv)); return PCH_MTP; default: return PCH_NONE; } } static bool intel_is_virt_pch(unsigned short id, unsigned short svendor, unsigned short sdevice) { return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || id == INTEL_PCH_P3X_DEVICE_ID_TYPE || (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && sdevice == PCI_SUBDEVICE_ID_QEMU)); } static void intel_virt_detect_pch(const struct drm_i915_private *dev_priv, unsigned short *pch_id, enum intel_pch *pch_type) { unsigned short id = 0; /* * In a virtualized passthrough environment we can be in a * setup where the ISA bridge is not able to be passed through. * In this case, a south bridge can be emulated and we have to * make an educated guess as to which PCH is really there. */ if (IS_METEORLAKE(dev_priv)) id = INTEL_PCH_MTP_DEVICE_ID_TYPE; else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) id = INTEL_PCH_ADP_DEVICE_ID_TYPE; else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) id = INTEL_PCH_MCC_DEVICE_ID_TYPE; else if (IS_ICELAKE(dev_priv)) id = INTEL_PCH_ICP_DEVICE_ID_TYPE; else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) id = INTEL_PCH_CNP_DEVICE_ID_TYPE; else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) id = INTEL_PCH_SPT_DEVICE_ID_TYPE; else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) id = INTEL_PCH_LPT_DEVICE_ID_TYPE; else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv)) id = INTEL_PCH_CPT_DEVICE_ID_TYPE; else if (GRAPHICS_VER(dev_priv) == 5) id = INTEL_PCH_IBX_DEVICE_ID_TYPE; if (id) drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id); else drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n"); *pch_type = intel_pch_type(dev_priv, id); /* Sanity check virtual PCH id */ if (drm_WARN_ON(&dev_priv->drm, id && *pch_type == PCH_NONE)) id = 0; *pch_id = id; } void intel_detect_pch(struct drm_i915_private *dev_priv) { struct pci_dev *pch = NULL; unsigned short id; enum intel_pch pch_type; /* DG1 has south engine display on the same PCI device */ if (IS_DG1(dev_priv)) { dev_priv->pch_type = PCH_DG1; return; } else if (IS_DG2(dev_priv)) { dev_priv->pch_type = PCH_DG2; return; } /* * The reason to probe ISA bridge instead of Dev31:Fun0 is to * make graphics device passthrough work easy for VMM, that only * need to expose ISA bridge to let driver know the real hardware * underneath. This is a requirement from virtualization team. * * In some virtualized environments (e.g. XEN), there is irrelevant * ISA bridge in the system. To work reliably, we should scan trhough * all the ISA bridge devices and check for the first match, instead * of only checking the first one. */ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor != PCI_VENDOR_ID_INTEL) continue; id = pch->device & INTEL_PCH_DEVICE_ID_MASK; pch_type = intel_pch_type(dev_priv, id); if (pch_type != PCH_NONE) { dev_priv->pch_type = pch_type; dev_priv->pch_id = id; break; } else if (intel_is_virt_pch(id, pch->subsystem_vendor, pch->subsystem_device)) { intel_virt_detect_pch(dev_priv, &id, &pch_type); dev_priv->pch_type = pch_type; dev_priv->pch_id = id; break; } } /* * Use PCH_NOP (PCH but no South Display) for PCH platforms without * display. */ if (pch && !HAS_DISPLAY(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "Display disabled, reverting to NOP PCH\n"); dev_priv->pch_type = PCH_NOP; dev_priv->pch_id = 0; } else if (!pch) { if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) { intel_virt_detect_pch(dev_priv, &id, &pch_type); dev_priv->pch_type = pch_type; dev_priv->pch_id = id; } else { drm_dbg_kms(&dev_priv->drm, "No PCH found.\n"); } } pci_dev_put(pch); }
linux-master
drivers/gpu/drm/i915/soc/intel_pch.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020 Intel Corporation. */ #include <linux/workqueue.h> #include "gem/i915_gem_context.h" #include "gt/intel_context.h" #include "gt/intel_gt.h" #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_irq.h" #include "intel_pxp_regs.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "intel_pxp_types.h" /** * DOC: PXP * * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms. * It allows execution and flip to display of protected (i.e. encrypted) * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig. * * Objects can opt-in to PXP encryption at creation time via the * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be * correctly protected they must be used in conjunction with a context created * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation * of those two uapi flags for details and restrictions. * * Protected objects are tied to a pxp session; currently we only support one * session, which i915 manages and whose index is available in the uapi * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting * protected objects. * The session is invalidated by the HW when certain events occur (e.g. * suspend/resume). When this happens, all the objects that were used with the * session are marked as invalid and all contexts marked as using protected * content are banned. Any further attempt at using them in an execbuf call is * rejected, while flips are converted to black frames. * * Some of the PXP setup operations are performed by the Management Engine, * which is handled by the mei driver; communication between i915 and mei is * performed via the mei_pxp component module. */ bool intel_pxp_is_supported(const struct intel_pxp *pxp) { return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp; } bool intel_pxp_is_enabled(const struct intel_pxp *pxp) { return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce; } bool intel_pxp_is_active(const struct intel_pxp *pxp) { return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid; } static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable) { u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) : _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES); intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val); } static void kcr_pxp_enable(const struct intel_pxp *pxp) { kcr_pxp_set_status(pxp, true); } static void kcr_pxp_disable(const struct intel_pxp *pxp) { kcr_pxp_set_status(pxp, false); } static int create_vcs_context(struct intel_pxp *pxp) { static struct lock_class_key pxp_lock; struct intel_gt *gt = pxp->ctrl_gt; struct intel_engine_cs *engine; struct intel_context *ce; int i; /* * Find the first VCS engine present. We're guaranteed there is one * if we're in this function due to the check in has_pxp */ for (i = 0, engine = NULL; !engine; i++) engine = gt->engine_class[VIDEO_DECODE_CLASS][i]; GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS); ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, I915_GEM_HWS_PXP_ADDR, &pxp_lock, "pxp_context"); if (IS_ERR(ce)) { drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n"); return PTR_ERR(ce); } pxp->ce = ce; return 0; } static void destroy_vcs_context(struct intel_pxp *pxp) { if (pxp->ce) intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce)); } static void pxp_init_full(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; int ret; /* * we'll use the completion to check if there is a termination pending, * so we start it as completed and we reinit it when a termination * is triggered. */ init_completion(&pxp->termination); complete_all(&pxp->termination); if (pxp->ctrl_gt->type == GT_MEDIA) pxp->kcr_base = MTL_KCR_BASE; else pxp->kcr_base = GEN12_KCR_BASE; intel_pxp_session_management_init(pxp); ret = create_vcs_context(pxp); if (ret) return; if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) ret = intel_pxp_gsccs_init(pxp); else ret = intel_pxp_tee_component_init(pxp); if (ret) goto out_context; drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n"); return; out_context: destroy_vcs_context(pxp); } static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915) { /* * NOTE: Only certain platforms require PXP-tee-backend dependencies * for HuC authentication. For now, its limited to DG2. */ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) && intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc)) return to_gt(i915); return NULL; } static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915) { if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp) return NULL; /* * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine * on the media GT. NOTE: if we have a media-tile with a GSC-engine, * the VDBOX is already present so skip that check. We also have to * ensure the GSC and HUC firmware are coming online */ if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) && intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) && intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw)) return i915->media_gt; /* * Else we rely on mei-pxp module but only on legacy platforms * prior to having separate media GTs and has a valid VDBOX. */ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) return to_gt(i915); return NULL; } int intel_pxp_init(struct drm_i915_private *i915) { struct intel_gt *gt; bool is_full_feature = false; /* * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since * we still need it if PXP's backend tee transport is needed. */ gt = find_gt_for_required_protected_content(i915); if (gt) is_full_feature = true; else gt = find_gt_for_required_teelink(i915); if (!gt) return -ENODEV; /* * At this point, we will either enable full featured PXP capabilities * including session and object management, or we will init the backend tee * channel for internal users such as HuC loading by GSC */ i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL); if (!i915->pxp) return -ENOMEM; /* init common info used by all feature-mode usages*/ i915->pxp->ctrl_gt = gt; mutex_init(&i915->pxp->tee_mutex); /* * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL * such as DG2, we can skip the init of the full PXP session/object management * and just init the tee channel. */ if (is_full_feature) pxp_init_full(i915->pxp); else intel_pxp_tee_component_init(i915->pxp); return 0; } void intel_pxp_fini(struct drm_i915_private *i915) { if (!i915->pxp) return; i915->pxp->arb_is_valid = false; if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0)) intel_pxp_gsccs_fini(i915->pxp); else intel_pxp_tee_component_fini(i915->pxp); destroy_vcs_context(i915->pxp); kfree(i915->pxp); i915->pxp = NULL; } void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp) { pxp->arb_is_valid = false; reinit_completion(&pxp->termination); } static void pxp_queue_termination(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; /* * We want to get the same effect as if we received a termination * interrupt, so just pretend that we did. */ spin_lock_irq(gt->irq_lock); intel_pxp_mark_termination_in_progress(pxp); pxp->session_events |= PXP_TERMINATION_REQUEST; queue_work(system_unbound_wq, &pxp->session_work); spin_unlock_irq(gt->irq_lock); } static bool pxp_component_bound(struct intel_pxp *pxp) { bool bound = false; mutex_lock(&pxp->tee_mutex); if (pxp->pxp_component) bound = true; mutex_unlock(&pxp->tee_mutex); return bound; } int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp) { if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) return GSCFW_MAX_ROUND_TRIP_LATENCY_MS; else return 250; } static int __pxp_global_teardown_final(struct intel_pxp *pxp) { int timeout; if (!pxp->arb_is_valid) return 0; /* * To ensure synchronous and coherent session teardown completion * in response to suspend or shutdown triggers, don't use a worker. */ intel_pxp_mark_termination_in_progress(pxp); intel_pxp_terminate(pxp, false); timeout = intel_pxp_get_backend_timeout_ms(pxp); if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) return -ETIMEDOUT; return 0; } static int __pxp_global_teardown_restart(struct intel_pxp *pxp) { int timeout; if (pxp->arb_is_valid) return 0; /* * The arb-session is currently inactive and we are doing a reset and restart * due to a runtime event. Use the worker that was designed for this. */ pxp_queue_termination(pxp); timeout = intel_pxp_get_backend_timeout_ms(pxp); if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) return -ETIMEDOUT; return 0; } void intel_pxp_end(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; intel_wakeref_t wakeref; if (!intel_pxp_is_enabled(pxp)) return; wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_lock(&pxp->arb_mutex); if (__pxp_global_teardown_final(pxp)) drm_dbg(&i915->drm, "PXP end timed out\n"); mutex_unlock(&pxp->arb_mutex); intel_pxp_fini_hw(pxp); intel_runtime_pm_put(&i915->runtime_pm, wakeref); } /* * this helper is used by both intel_pxp_start and by * the GET_PARAM IOCTL that user space calls. Thus, the * return values here should match the UAPI spec. */ int intel_pxp_get_readiness_status(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return -ENODEV; if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) { if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250)) return 2; } else { if (wait_for(pxp_component_bound(pxp), 250)) return 2; } return 1; } /* * the arb session is restarted from the irq work when we receive the * termination completion interrupt */ int intel_pxp_start(struct intel_pxp *pxp) { int ret = 0; ret = intel_pxp_get_readiness_status(pxp); if (ret < 0) return ret; else if (ret > 1) return -EIO; /* per UAPI spec, user may retry later */ mutex_lock(&pxp->arb_mutex); ret = __pxp_global_teardown_restart(pxp); if (ret) goto unlock; /* make sure the compiler doesn't optimize the double access */ barrier(); if (!pxp->arb_is_valid) ret = -EIO; unlock: mutex_unlock(&pxp->arb_mutex); return ret; } void intel_pxp_init_hw(struct intel_pxp *pxp) { kcr_pxp_enable(pxp); intel_pxp_irq_enable(pxp); } void intel_pxp_fini_hw(struct intel_pxp *pxp) { kcr_pxp_disable(pxp); intel_pxp_irq_disable(pxp); } int intel_pxp_key_check(struct intel_pxp *pxp, struct drm_i915_gem_object *obj, bool assign) { if (!intel_pxp_is_active(pxp)) return -ENODEV; if (!i915_gem_object_is_protected(obj)) return -EINVAL; GEM_BUG_ON(!pxp->key_instance); /* * If this is the first time we're using this object, it's not * encrypted yet; it will be encrypted with the current key, so mark it * as such. If the object is already encrypted, check instead if the * used key is still valid. */ if (!obj->pxp_key_instance && assign) obj->pxp_key_instance = pxp->key_instance; if (obj->pxp_key_instance != pxp->key_instance) return -ENOEXEC; return 0; } void intel_pxp_invalidate(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct i915_gem_context *ctx, *cn; /* ban all contexts marked as protected */ spin_lock_irq(&i915->gem.contexts.lock); list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { struct i915_gem_engines_iter it; struct intel_context *ce; if (!kref_get_unless_zero(&ctx->ref)) continue; if (likely(!i915_gem_context_uses_protected_content(ctx))) { i915_gem_context_put(ctx); continue; } spin_unlock_irq(&i915->gem.contexts.lock); /* * By the time we get here we are either going to suspend with * quiesced execution or the HW keys are already long gone and * in this case it is worthless to attempt to close the context * and wait for its execution. It will hang the GPU if it has * not already. So, as a fast mitigation, we can ban the * context as quick as we can. That might race with the * execbuffer, but currently this is the best that can be done. */ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) intel_context_ban(ce, NULL); i915_gem_context_unlock_engines(ctx); /* * The context has been banned, no need to keep the wakeref. * This is safe from races because the only other place this * is touched is context_release and we're holding a ctx ref */ if (ctx->pxp_wakeref) { intel_runtime_pm_put(&i915->runtime_pm, ctx->pxp_wakeref); ctx->pxp_wakeref = 0; } spin_lock_irq(&i915->gem.contexts.lock); list_safe_reset_next(ctx, cn, link); i915_gem_context_put(ctx); } spin_unlock_irq(&i915->gem.contexts.lock); }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020 Intel Corporation. */ #include <linux/workqueue.h> #include "gt/intel_gt_irq.h" #include "gt/intel_gt_regs.h" #include "gt/intel_gt_types.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_pxp.h" #include "intel_pxp_irq.h" #include "intel_pxp_session.h" #include "intel_pxp_types.h" #include "intel_runtime_pm.h" /** * intel_pxp_irq_handler - Handles PXP interrupts. * @pxp: pointer to pxp struct * @iir: interrupt vector */ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) { struct intel_gt *gt; if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp))) return; gt = pxp->ctrl_gt; lockdep_assert_held(gt->irq_lock); if (unlikely(!iir)) return; if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) { /* immediately mark PXP as inactive on termination */ intel_pxp_mark_termination_in_progress(pxp); pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED; } if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT) pxp->session_events |= PXP_TERMINATION_COMPLETE; if (pxp->session_events) queue_work(system_unbound_wq, &pxp->session_work); } static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts) { struct intel_uncore *uncore = gt->uncore; const u32 mask = interrupts << 16; intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask); intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask); } static inline void pxp_irq_reset(struct intel_gt *gt) { spin_lock_irq(gt->irq_lock); gen11_gt_reset_one_iir(gt, 0, GEN11_KCR); spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_enable(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; spin_lock_irq(gt->irq_lock); if (!pxp->irq_enabled) WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR)); __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS); pxp->irq_enabled = true; spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_disable(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; /* * We always need to submit a global termination when we re-enable the * interrupts, so there is no need to make sure that the session state * makes sense at the end of this function. Just make sure this is not * called in a path were the driver consider the session as valid and * doesn't call a termination on restart. */ GEM_WARN_ON(intel_pxp_is_active(pxp)); spin_lock_irq(gt->irq_lock); pxp->irq_enabled = false; __pxp_set_interrupts(gt, 0); spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); pxp_irq_reset(gt); flush_work(&pxp->session_work); }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020 Intel Corporation. */ #include <linux/component.h> #include <drm/i915_pxp_tee_interface.h> #include <drm/i915_component.h> #include "gem/i915_gem_lmem.h" #include "i915_drv.h" #include "gt/intel_gt.h" #include "intel_pxp.h" #include "intel_pxp_cmd_interface_42.h" #include "intel_pxp_huc.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "intel_pxp_types.h" static bool is_fw_err_platform_config(u32 type) { switch (type) { case PXP_STATUS_ERROR_API_VERSION: case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: case PXP_STATUS_PLATFCONFIG_KF1_BAD: return true; default: break; } return false; } static const char * fw_err_to_string(u32 type) { switch (type) { case PXP_STATUS_ERROR_API_VERSION: return "ERR_API_VERSION"; case PXP_STATUS_NOT_READY: return "ERR_NOT_READY"; case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: case PXP_STATUS_PLATFCONFIG_KF1_BAD: return "ERR_PLATFORM_CONFIG"; default: break; } return NULL; } static int intel_pxp_tee_io_message(struct intel_pxp *pxp, void *msg_in, u32 msg_in_size, void *msg_out, u32 msg_out_max_size, u32 *msg_out_rcv_size) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct i915_pxp_component *pxp_component = pxp->pxp_component; int ret = 0; mutex_lock(&pxp->tee_mutex); /* * The binding of the component is asynchronous from i915 probe, so we * can't be sure it has happened. */ if (!pxp_component) { ret = -ENODEV; goto unlock; } ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size); if (ret) { drm_err(&i915->drm, "Failed to send PXP TEE message\n"); goto unlock; } ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size); if (ret < 0) { drm_err(&i915->drm, "Failed to receive PXP TEE message\n"); goto unlock; } if (ret > msg_out_max_size) { drm_err(&i915->drm, "Failed to receive PXP TEE message due to unexpected output size\n"); ret = -ENOSPC; goto unlock; } if (msg_out_rcv_size) *msg_out_rcv_size = ret; ret = 0; unlock: mutex_unlock(&pxp->tee_mutex); return ret; } int intel_pxp_tee_stream_message(struct intel_pxp *pxp, u8 client_id, u32 fence_id, void *msg_in, size_t msg_in_len, void *msg_out, size_t msg_out_len) { /* TODO: for bigger objects we need to use a sg of 4k pages */ const size_t max_msg_size = PAGE_SIZE; struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct i915_pxp_component *pxp_component = pxp->pxp_component; unsigned int offset = 0; struct scatterlist *sg; int ret; if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) return -ENOSPC; mutex_lock(&pxp->tee_mutex); if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) { ret = -ENODEV; goto unlock; } GEM_BUG_ON(!pxp->stream_cmd.obj); sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset); memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len); ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id, fence_id, sg, msg_in_len, sg); if (ret < 0) drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n"); else memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len); unlock: mutex_unlock(&pxp->tee_mutex); return ret; } /** * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee * @i915_kdev: pointer to i915 kernel device * @tee_kdev: pointer to tee kernel device * @data: pointer to pxp_tee_master containing the function pointers * * This bind function is called during the system boot or resume from system sleep. * * Return: return 0 if successful. */ static int i915_pxp_tee_component_bind(struct device *i915_kdev, struct device *tee_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915->pxp; struct intel_uc *uc = &pxp->ctrl_gt->uc; intel_wakeref_t wakeref; int ret = 0; if (!HAS_HECI_PXP(i915)) { pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS); if (drm_WARN_ON(&i915->drm, !pxp->dev_link)) return -ENODEV; } mutex_lock(&pxp->tee_mutex); pxp->pxp_component = data; pxp->pxp_component->tee_dev = tee_kdev; mutex_unlock(&pxp->tee_mutex); if (intel_uc_uses_huc(uc) && intel_huc_is_loaded_by_gsc(&uc->huc)) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) { /* load huc via pxp */ ret = intel_huc_fw_load_and_auth_via_gsc(&uc->huc); if (ret < 0) drm_err(&i915->drm, "failed to load huc via gsc %d\n", ret); } } /* if we are suspended, the HW will be re-initialized on resume */ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) return 0; /* the component is required to fully start the PXP HW */ if (intel_pxp_is_enabled(pxp)) intel_pxp_init_hw(pxp); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; } static void i915_pxp_tee_component_unbind(struct device *i915_kdev, struct device *tee_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915->pxp; intel_wakeref_t wakeref; if (intel_pxp_is_enabled(pxp)) with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) intel_pxp_fini_hw(pxp); mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; mutex_unlock(&pxp->tee_mutex); if (pxp->dev_link) { device_link_del(pxp->dev_link); pxp->dev_link = NULL; } } static const struct component_ops i915_pxp_tee_component_ops = { .bind = i915_pxp_tee_component_bind, .unbind = i915_pxp_tee_component_unbind, }; static int alloc_streaming_command(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct drm_i915_gem_object *obj = NULL; void *cmd; int err; pxp->stream_cmd.obj = NULL; pxp->stream_cmd.vaddr = NULL; if (!IS_DGFX(i915)) return 0; /* allocate lmem object of one page for PXP command memory and store it */ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) { drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n"); return PTR_ERR(obj); } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { drm_err(&i915->drm, "Failed to pin gsc message page!\n"); goto out_put; } /* map the lmem into the virtual memory pointer */ cmd = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(pxp->ctrl_gt, obj, true)); if (IS_ERR(cmd)) { drm_err(&i915->drm, "Failed to map gsc message page!\n"); err = PTR_ERR(cmd); goto out_unpin; } memset(cmd, 0, obj->base.size); pxp->stream_cmd.obj = obj; pxp->stream_cmd.vaddr = cmd; return 0; out_unpin: i915_gem_object_unpin_pages(obj); out_put: i915_gem_object_put(obj); return err; } static void free_streaming_command(struct intel_pxp *pxp) { struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj); if (!obj) return; i915_gem_object_unpin_map(obj); i915_gem_object_unpin_pages(obj); i915_gem_object_put(obj); } int intel_pxp_tee_component_init(struct intel_pxp *pxp) { int ret; struct intel_gt *gt = pxp->ctrl_gt; struct drm_i915_private *i915 = gt->i915; ret = alloc_streaming_command(pxp); if (ret) return ret; ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops, I915_COMPONENT_PXP); if (ret < 0) { drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret); goto out_free; } pxp->pxp_component_added = true; return 0; out_free: free_streaming_command(pxp); return ret; } void intel_pxp_tee_component_fini(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; if (!pxp->pxp_component_added) return; component_del(i915->drm.dev, &i915_pxp_tee_component_ops); pxp->pxp_component_added = false; free_streaming_command(pxp); } int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, int arb_session_id) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct pxp42_create_arb_in msg_in = {0}; struct pxp42_create_arb_out msg_out = {0}; int ret; msg_in.header.api_version = PXP_APIVER(4, 2); msg_in.header.command_id = PXP42_CMDID_INIT_SESSION; msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); msg_in.protection_mode = PXP42_ARB_SESSION_MODE_HEAVY; msg_in.session_id = arb_session_id; ret = intel_pxp_tee_io_message(pxp, &msg_in, sizeof(msg_in), &msg_out, sizeof(msg_out), NULL); if (ret) { drm_err(&i915->drm, "Failed to send tee msg init arb session, ret=[%d]\n", ret); } else if (msg_out.header.status != 0) { if (is_fw_err_platform_config(msg_out.header.status)) { drm_info_once(&i915->drm, "PXP init-arb-session-%d failed due to BIOS/SOC:0x%08x:%s\n", arb_session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); } else { drm_dbg(&i915->drm, "PXP init-arb-session--%d failed 0x%08x:%st:\n", arb_session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", msg_in.header.command_id, msg_in.header.api_version); } } return ret; } void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct pxp42_inv_stream_key_in msg_in = {0}; struct pxp42_inv_stream_key_out msg_out = {0}; int ret, trials = 0; try_again: memset(&msg_in, 0, sizeof(msg_in)); memset(&msg_out, 0, sizeof(msg_out)); msg_in.header.api_version = PXP_APIVER(4, 2); msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); ret = intel_pxp_tee_io_message(pxp, &msg_in, sizeof(msg_in), &msg_out, sizeof(msg_out), NULL); /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */ if ((ret || msg_out.header.status != 0x0) && ++trials < 3) goto try_again; if (ret) { drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%u, ret=[%d]\n", session_id, ret); } else if (msg_out.header.status != 0) { if (is_fw_err_platform_config(msg_out.header.status)) { drm_info_once(&i915->drm, "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n", session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); } else { drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n", session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", msg_in.header.command_id, msg_in.header.api_version); } } }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020, Intel Corporation. All rights reserved. */ #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "i915_trace.h" #include "intel_pxp.h" #include "intel_pxp_cmd.h" #include "intel_pxp_session.h" #include "intel_pxp_types.h" /* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */ #define MFX_WAIT_PXP (MFX_WAIT | \ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG) static u32 *pxp_emit_session_selection(u32 *cs, u32 idx) { *cs++ = MFX_WAIT_PXP; /* pxp off */ *cs++ = MI_FLUSH_DW; *cs++ = 0; *cs++ = 0; /* select session */ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx); *cs++ = MFX_WAIT_PXP; /* pxp on */ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = 0; *cs++ = MFX_WAIT_PXP; return cs; } static u32 *pxp_emit_inline_termination(u32 *cs) { /* session inline termination */ *cs++ = CRYPTO_KEY_EXCHANGE; *cs++ = 0; return cs; } static u32 *pxp_emit_session_termination(u32 *cs, u32 idx) { cs = pxp_emit_session_selection(cs, idx); cs = pxp_emit_inline_termination(cs); return cs; } static u32 *pxp_emit_wait(u32 *cs) { /* wait for cmds to go through */ *cs++ = MFX_WAIT_PXP; *cs++ = 0; return cs; } /* * if we ever need to terminate more than one session, we can submit multiple * selections and terminations back-to-back with a single wait at the end */ #define SELECTION_LEN 10 #define TERMINATION_LEN 2 #define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x)) #define WAIT_LEN 2 static void pxp_request_commit(struct i915_request *rq) { struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; struct intel_timeline * const tl = i915_request_timeline(rq); lockdep_unpin_lock(&tl->mutex, rq->cookie); trace_i915_request_add(rq); __i915_request_commit(rq); __i915_request_queue(rq, &attr); mutex_unlock(&tl->mutex); } int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id) { struct i915_request *rq; struct intel_context *ce = pxp->ce; u32 *cs; int err = 0; if (!intel_pxp_is_enabled(pxp)) return 0; rq = i915_request_create(ce); if (IS_ERR(rq)) return PTR_ERR(rq); if (ce->engine->emit_init_breadcrumb) { err = ce->engine->emit_init_breadcrumb(rq); if (err) goto out_rq; } cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto out_rq; } cs = pxp_emit_session_termination(cs, id); cs = pxp_emit_wait(cs); intel_ring_advance(rq, cs); out_rq: i915_request_get(rq); if (unlikely(err)) i915_request_set_error_once(rq, err); pxp_request_commit(rq); if (!err && i915_request_wait(rq, 0, HZ / 5) < 0) err = -ETIME; i915_request_put(rq); return err; }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2023 Intel Corporation. */ #include "gem/i915_gem_internal.h" #include "gt/intel_context.h" #include "gt/intel_gt.h" #include "gt/uc/intel_gsc_fw.h" #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_cmd_interface_42.h" #include "intel_pxp_cmd_interface_43.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_types.h" static bool is_fw_err_platform_config(u32 type) { switch (type) { case PXP_STATUS_ERROR_API_VERSION: case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: case PXP_STATUS_PLATFCONFIG_KF1_BAD: return true; default: break; } return false; } static const char * fw_err_to_string(u32 type) { switch (type) { case PXP_STATUS_ERROR_API_VERSION: return "ERR_API_VERSION"; case PXP_STATUS_NOT_READY: return "ERR_NOT_READY"; case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: case PXP_STATUS_PLATFCONFIG_KF1_BAD: return "ERR_PLATFORM_CONFIG"; default: break; } return NULL; } static int gsccs_send_message(struct intel_pxp *pxp, void *msg_in, size_t msg_in_size, void *msg_out, size_t msg_out_size_max, size_t *msg_out_len, u64 *gsc_msg_handle_retry) { struct intel_gt *gt = pxp->ctrl_gt; struct drm_i915_private *i915 = gt->i915; struct gsccs_session_resources *exec_res = &pxp->gsccs_res; struct intel_gsc_mtl_header *header = exec_res->pkt_vaddr; struct intel_gsc_heci_non_priv_pkt pkt; size_t max_msg_size; u32 reply_size; int ret; if (!exec_res->ce) return -ENODEV; max_msg_size = PXP43_MAX_HECI_INOUT_SIZE - sizeof(*header); if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size) return -ENOSPC; if (!exec_res->pkt_vma || !exec_res->bb_vma) return -ENOENT; GEM_BUG_ON(exec_res->pkt_vma->size < (2 * PXP43_MAX_HECI_INOUT_SIZE)); mutex_lock(&pxp->tee_mutex); memset(header, 0, sizeof(*header)); intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_PXP, msg_in_size + sizeof(*header), exec_res->host_session_handle); /* check if this is a host-session-handle cleanup call (empty packet) */ if (!msg_in && !msg_out) header->flags |= GSC_INFLAG_MSG_CLEANUP; /* copy caller provided gsc message handle if this is polling for a prior msg completion */ header->gsc_message_handle = *gsc_msg_handle_retry; /* NOTE: zero size packets are used for session-cleanups */ if (msg_in && msg_in_size) memcpy(exec_res->pkt_vaddr + sizeof(*header), msg_in, msg_in_size); pkt.addr_in = i915_vma_offset(exec_res->pkt_vma); pkt.size_in = header->message_size; pkt.addr_out = pkt.addr_in + PXP43_MAX_HECI_INOUT_SIZE; pkt.size_out = msg_out_size_max + sizeof(*header); pkt.heci_pkt_vma = exec_res->pkt_vma; pkt.bb_vma = exec_res->bb_vma; /* * Before submitting, let's clear-out the validity marker on the reply offset. * We use offset PXP43_MAX_HECI_INOUT_SIZE for reply location so point header there. */ header = exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE; header->validity_marker = 0; ret = intel_gsc_uc_heci_cmd_submit_nonpriv(&gt->uc.gsc, exec_res->ce, &pkt, exec_res->bb_vaddr, GSC_REPLY_LATENCY_MS); if (ret) { drm_err(&i915->drm, "failed to send gsc PXP msg (%d)\n", ret); goto unlock; } /* Response validity marker, status and busyness */ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) { drm_err(&i915->drm, "gsc PXP reply with invalid validity marker\n"); ret = -EINVAL; goto unlock; } if (header->status != 0) { drm_dbg(&i915->drm, "gsc PXP reply status has error = 0x%08x\n", header->status); ret = -EINVAL; goto unlock; } if (header->flags & GSC_OUTFLAG_MSG_PENDING) { drm_dbg(&i915->drm, "gsc PXP reply is busy\n"); /* * When the GSC firmware replies with pending bit, it means that the requested * operation has begun but the completion is pending and the caller needs * to re-request with the gsc_message_handle that was returned by the firmware. * until the pending bit is turned off. */ *gsc_msg_handle_retry = header->gsc_message_handle; ret = -EAGAIN; goto unlock; } reply_size = header->message_size - sizeof(*header); if (reply_size > msg_out_size_max) { drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%zu)\n", reply_size, msg_out_size_max); reply_size = msg_out_size_max; } if (msg_out) memcpy(msg_out, exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE + sizeof(*header), reply_size); if (msg_out_len) *msg_out_len = reply_size; unlock: mutex_unlock(&pxp->tee_mutex); return ret; } static int gsccs_send_message_retry_complete(struct intel_pxp *pxp, void *msg_in, size_t msg_in_size, void *msg_out, size_t msg_out_size_max, size_t *msg_out_len) { u64 gsc_session_retry = 0; int ret, tries = 0; /* * Keep sending request if GSC firmware was busy. Based on fw specs + * sw overhead (and testing) we expect a worst case pending-bit delay of * GSC_PENDING_RETRY_MAXCOUNT x GSC_PENDING_RETRY_PAUSE_MS millisecs. */ do { ret = gsccs_send_message(pxp, msg_in, msg_in_size, msg_out, msg_out_size_max, msg_out_len, &gsc_session_retry); /* Only try again if gsc says so */ if (ret != -EAGAIN) break; msleep(GSC_PENDING_RETRY_PAUSE_MS); } while (++tries < GSC_PENDING_RETRY_MAXCOUNT); return ret; } bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) { /* * GSC-fw loading, HuC-fw loading, HuC-fw authentication and * GSC-proxy init flow (requiring an mei component driver) * must all occur first before we can start requesting for PXP * sessions. Checking for completion on HuC authentication and * gsc-proxy init flow (the last set of dependencies that * are out of order) will suffice. */ if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) && intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true)) return true; return false; } int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct pxp43_create_arb_in msg_in = {0}; struct pxp43_create_arb_out msg_out = {0}; int ret; msg_in.header.api_version = PXP_APIVER(4, 3); msg_in.header.command_id = PXP43_CMDID_INIT_SESSION; msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, arb_session_id) | FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) | FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0)); msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB; ret = gsccs_send_message_retry_complete(pxp, &msg_in, sizeof(msg_in), &msg_out, sizeof(msg_out), NULL); if (ret) { drm_err(&i915->drm, "Failed to init session %d, ret=[%d]\n", arb_session_id, ret); } else if (msg_out.header.status != 0) { if (is_fw_err_platform_config(msg_out.header.status)) { drm_info_once(&i915->drm, "PXP init-session-%d failed due to BIOS/SOC:0x%08x:%s\n", arb_session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); } else { drm_dbg(&i915->drm, "PXP init-session-%d failed 0x%08x:%st:\n", arb_session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", msg_in.header.command_id, msg_in.header.api_version); } } return ret; } void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; struct pxp42_inv_stream_key_in msg_in = {0}; struct pxp42_inv_stream_key_out msg_out = {0}; int ret = 0; /* * Stream key invalidation reuses the same version 4.2 input/output * command format but firmware requires 4.3 API interaction */ msg_in.header.api_version = PXP_APIVER(4, 3); msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); ret = gsccs_send_message_retry_complete(pxp, &msg_in, sizeof(msg_in), &msg_out, sizeof(msg_out), NULL); if (ret) { drm_err(&i915->drm, "Failed to inv-stream-key-%u, ret=[%d]\n", session_id, ret); } else if (msg_out.header.status != 0) { if (is_fw_err_platform_config(msg_out.header.status)) { drm_info_once(&i915->drm, "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n", session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); } else { drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n", session_id, msg_out.header.status, fw_err_to_string(msg_out.header.status)); drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", msg_in.header.command_id, msg_in.header.api_version); } } } static void gsccs_cleanup_fw_host_session_handle(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp->ctrl_gt->i915; int ret; ret = gsccs_send_message_retry_complete(pxp, NULL, 0, NULL, 0, NULL); if (ret) drm_dbg(&i915->drm, "Failed to send gsccs msg host-session-cleanup: ret=[%d]\n", ret); } static void gsccs_destroy_execution_resource(struct intel_pxp *pxp) { struct gsccs_session_resources *exec_res = &pxp->gsccs_res; if (exec_res->host_session_handle) gsccs_cleanup_fw_host_session_handle(pxp); if (exec_res->ce) intel_context_put(exec_res->ce); if (exec_res->bb_vma) i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP); if (exec_res->pkt_vma) i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP); memset(exec_res, 0, sizeof(*exec_res)); } static int gsccs_create_buffer(struct intel_gt *gt, const char *bufname, size_t size, struct i915_vma **vma, void **map) { struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; int err = 0; obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) { drm_err(&i915->drm, "Failed to allocate gsccs backend %s.\n", bufname); err = PTR_ERR(obj); goto out_none; } *vma = i915_vma_instance(obj, gt->vm, NULL); if (IS_ERR(*vma)) { drm_err(&i915->drm, "Failed to vma-instance gsccs backend %s.\n", bufname); err = PTR_ERR(*vma); goto out_put; } /* return a virtual pointer */ *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true)); if (IS_ERR(*map)) { drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname); err = PTR_ERR(*map); goto out_put; } /* all PXP sessions commands are treated as non-privileged */ err = i915_vma_pin(*vma, 0, 0, PIN_USER); if (err) { drm_err(&i915->drm, "Failed to vma-pin gsccs backend %s.\n", bufname); goto out_unmap; } return 0; out_unmap: i915_gem_object_unpin_map(obj); out_put: i915_gem_object_put(obj); out_none: *vma = NULL; *map = NULL; return err; } static int gsccs_allocate_execution_resource(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; struct gsccs_session_resources *exec_res = &pxp->gsccs_res; struct intel_engine_cs *engine = gt->engine[GSC0]; struct intel_context *ce; int err = 0; /* * First, ensure the GSC engine is present. * NOTE: Backend would only be called with the correct gt. */ if (!engine) return -ENODEV; /* * Now, allocate, pin and map two objects, one for the heci message packet * and another for the batch buffer we submit into GSC engine (that includes the packet). * NOTE: GSC-CS backend is currently only supported on MTL, so we allocate shmem. */ err = gsccs_create_buffer(pxp->ctrl_gt, "Heci Packet", 2 * PXP43_MAX_HECI_INOUT_SIZE, &exec_res->pkt_vma, &exec_res->pkt_vaddr); if (err) return err; err = gsccs_create_buffer(pxp->ctrl_gt, "Batch Buffer", PAGE_SIZE, &exec_res->bb_vma, &exec_res->bb_vaddr); if (err) goto free_pkt; /* Finally, create an intel_context to be used during the submission */ ce = intel_context_create(engine); if (IS_ERR(ce)) { drm_err(&gt->i915->drm, "Failed creating gsccs backend ctx\n"); err = PTR_ERR(ce); goto free_batch; } i915_vm_put(ce->vm); ce->vm = i915_vm_get(pxp->ctrl_gt->vm); exec_res->ce = ce; /* initialize host-session-handle (for all i915-to-gsc-firmware PXP cmds) */ get_random_bytes(&exec_res->host_session_handle, sizeof(exec_res->host_session_handle)); return 0; free_batch: i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP); free_pkt: i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP); memset(exec_res, 0, sizeof(*exec_res)); return err; } void intel_pxp_gsccs_fini(struct intel_pxp *pxp) { intel_wakeref_t wakeref; gsccs_destroy_execution_resource(pxp); with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref) intel_pxp_fini_hw(pxp); } int intel_pxp_gsccs_init(struct intel_pxp *pxp) { int ret; intel_wakeref_t wakeref; ret = gsccs_allocate_execution_resource(pxp); if (!ret) { with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref) intel_pxp_init_hw(pxp); } return ret; }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020 Intel Corporation. */ #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_irq.h" #include "intel_pxp_pm.h" #include "intel_pxp_session.h" #include "intel_pxp_types.h" void intel_pxp_suspend_prepare(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; intel_pxp_end(pxp); intel_pxp_invalidate(pxp); } void intel_pxp_suspend(struct intel_pxp *pxp) { intel_wakeref_t wakeref; if (!intel_pxp_is_enabled(pxp)) return; with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref) { intel_pxp_fini_hw(pxp); pxp->hw_state_invalidated = false; } } void intel_pxp_resume_complete(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; /* * The PXP component gets automatically unbound when we go into S3 and * re-bound after we come out, so in that scenario we can defer the * hw init to the bind call. * NOTE: GSC-CS backend doesn't rely on components. */ if (!HAS_ENGINE(pxp->ctrl_gt, GSC0) && !pxp->pxp_component) return; intel_pxp_init_hw(pxp); } void intel_pxp_runtime_suspend(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; pxp->arb_is_valid = false; intel_pxp_fini_hw(pxp); pxp->hw_state_invalidated = false; }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_print.h> #include "gt/intel_gt_debugfs.h" #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_debugfs.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_irq.h" #include "intel_pxp_types.h" static int pxp_info_show(struct seq_file *m, void *data) { struct intel_pxp *pxp = m->private; struct drm_printer p = drm_seq_file_printer(m); if (!intel_pxp_is_enabled(pxp)) { drm_printf(&p, "pxp disabled\n"); return 0; } drm_printf(&p, "active: %s\n", str_yes_no(intel_pxp_is_active(pxp))); drm_printf(&p, "instance counter: %u\n", pxp->key_instance); return 0; } DEFINE_SHOW_ATTRIBUTE(pxp_info); static int pxp_terminate_get(void *data, u64 *val) { /* nothing to read */ return -EPERM; } static int pxp_terminate_set(void *data, u64 val) { struct intel_pxp *pxp = data; struct intel_gt *gt = pxp->ctrl_gt; int timeout_ms; if (!intel_pxp_is_active(pxp)) return -ENODEV; /* simulate a termination interrupt */ spin_lock_irq(gt->irq_lock); intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT); spin_unlock_irq(gt->irq_lock); timeout_ms = intel_pxp_get_backend_timeout_ms(pxp); if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout_ms))) return -ETIMEDOUT; return 0; } DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n"); void intel_pxp_debugfs_register(struct intel_pxp *pxp) { struct drm_minor *minor; struct dentry *pxproot; if (!intel_pxp_is_supported(pxp)) return; minor = pxp->ctrl_gt->i915->drm.primary; if (!minor->debugfs_root) return; pxproot = debugfs_create_dir("pxp", minor->debugfs_root); if (IS_ERR(pxproot)) return; debugfs_create_file("info", 0444, pxproot, pxp, &pxp_info_fops); debugfs_create_file("terminate_state", 0644, pxproot, pxp, &pxp_terminate_fops); }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020, Intel Corporation. All rights reserved. */ #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_cmd.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "intel_pxp_types.h" #include "intel_pxp_regs.h" #define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */ static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id) { struct intel_uncore *uncore = pxp->ctrl_gt->uncore; intel_wakeref_t wakeref; u32 sip = 0; /* if we're suspended the session is considered off */ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) sip = intel_uncore_read(uncore, KCR_SIP(pxp->kcr_base)); return sip & BIT(id); } static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play) { struct intel_uncore *uncore = pxp->ctrl_gt->uncore; intel_wakeref_t wakeref; u32 mask = BIT(id); int ret; /* if we're suspended the session is considered off */ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm); if (!wakeref) return in_play ? -ENODEV : 0; ret = intel_wait_for_register(uncore, KCR_SIP(pxp->kcr_base), mask, in_play ? mask : 0, 250); intel_runtime_pm_put(uncore->rpm, wakeref); return ret; } static int pxp_create_arb_session(struct intel_pxp *pxp) { struct intel_gt *gt = pxp->ctrl_gt; int ret; pxp->arb_is_valid = false; if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) { drm_err(&gt->i915->drm, "arb session already in play at creation time\n"); return -EEXIST; } if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) ret = intel_pxp_gsccs_create_session(pxp, ARB_SESSION); else ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION); if (ret) { drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n"); return ret; } ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true); if (ret) { drm_dbg(&gt->i915->drm, "arb session failed to go in play\n"); return ret; } drm_dbg(&gt->i915->drm, "PXP ARB session is alive\n"); if (!++pxp->key_instance) ++pxp->key_instance; pxp->arb_is_valid = true; return 0; } static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) { int ret; struct intel_gt *gt = pxp->ctrl_gt; /* must mark termination in progress calling this function */ GEM_WARN_ON(pxp->arb_is_valid); /* terminate the hw sessions */ ret = intel_pxp_terminate_session(pxp, ARB_SESSION); if (ret) { drm_err(&gt->i915->drm, "Failed to submit session termination\n"); return ret; } ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false); if (ret) { drm_err(&gt->i915->drm, "Session state did not clear\n"); return ret; } intel_uncore_write(gt->uncore, KCR_GLOBAL_TERMINATE(pxp->kcr_base), 1); if (HAS_ENGINE(gt, GSC0)) intel_pxp_gsccs_end_arb_fw_session(pxp, ARB_SESSION); else intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); return ret; } void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) { int ret; pxp->hw_state_invalidated = post_invalidation_needs_restart; /* * if we fail to submit the termination there is no point in waiting for * it to complete. PXP will be marked as non-active until the next * termination is issued. */ ret = pxp_terminate_arb_session_and_global(pxp); if (ret) complete_all(&pxp->termination); } static void pxp_terminate_complete(struct intel_pxp *pxp) { /* Re-create the arb session after teardown handle complete */ if (fetch_and_zero(&pxp->hw_state_invalidated)) pxp_create_arb_session(pxp); complete_all(&pxp->termination); } static void pxp_session_work(struct work_struct *work) { struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work); struct intel_gt *gt = pxp->ctrl_gt; intel_wakeref_t wakeref; u32 events = 0; spin_lock_irq(gt->irq_lock); events = fetch_and_zero(&pxp->session_events); spin_unlock_irq(gt->irq_lock); if (!events) return; if (events & PXP_INVAL_REQUIRED) intel_pxp_invalidate(pxp); /* * If we're processing an event while suspending then don't bother, * we're going to re-init everything on resume anyway. */ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm); if (!wakeref) return; if (events & PXP_TERMINATION_REQUEST) { events &= ~PXP_TERMINATION_COMPLETE; intel_pxp_terminate(pxp, true); } if (events & PXP_TERMINATION_COMPLETE) pxp_terminate_complete(pxp); intel_runtime_pm_put(gt->uncore->rpm, wakeref); } void intel_pxp_session_management_init(struct intel_pxp *pxp) { mutex_init(&pxp->arb_mutex); INIT_WORK(&pxp->session_work, pxp_session_work); }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
// SPDX-License-Identifier: MIT /* * Copyright(c) 2021-2022, Intel Corporation. All rights reserved. */ #include "i915_drv.h" #include "gem/i915_gem_region.h" #include "gt/intel_gt.h" #include "intel_pxp.h" #include "intel_pxp_huc.h" #include "intel_pxp_tee.h" #include "intel_pxp_types.h" #include "intel_pxp_cmd_interface_43.h" int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp) { struct intel_gt *gt; struct intel_huc *huc; struct pxp43_start_huc_auth_in huc_in = {0}; struct pxp43_huc_auth_out huc_out = {0}; dma_addr_t huc_phys_addr; u8 client_id = 0; u8 fence_id = 0; int err; if (!pxp || !pxp->pxp_component) return -ENODEV; gt = pxp->ctrl_gt; huc = &gt->uc.huc; huc_phys_addr = i915_gem_object_get_dma_address(huc->fw.obj, 0); /* write the PXP message into the lmem (the sg list) */ huc_in.header.api_version = PXP_APIVER(4, 3); huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH; huc_in.header.status = 0; huc_in.header.buffer_len = sizeof(huc_in.huc_base_address); huc_in.huc_base_address = cpu_to_le64(huc_phys_addr); err = intel_pxp_tee_stream_message(pxp, client_id, fence_id, &huc_in, sizeof(huc_in), &huc_out, sizeof(huc_out)); if (err < 0) { drm_err(&gt->i915->drm, "Failed to send HuC load and auth command to GSC [%d]!\n", err); return err; } /* * HuC does sometimes survive suspend/resume (it depends on how "deep" * a sleep state the device reaches) so we can end up here on resume * with HuC already loaded, in which case the GSC will return * PXP_STATUS_OP_NOT_PERMITTED. We can therefore consider the GuC * correctly transferred in this scenario; if the same error is ever * returned with HuC not loaded we'll still catch it when we check the * authentication bit later. */ if (huc_out.header.status != PXP_STATUS_SUCCESS && huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) { drm_err(&gt->i915->drm, "HuC load failed with GSC error = 0x%x\n", huc_out.header.status); return -EPROTO; } return 0; }
linux-master
drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <[email protected]> * * Contributors: * Bing Niu <[email protected]> * Xu Han <[email protected]> * Ping Gao <[email protected]> * Xiaoguang Chen <[email protected]> * Yang Liu <[email protected]> * Tina Zhang <[email protected]> * */ #include <uapi/drm/drm_fourcc.h> #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "i915_reg.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { int drm_format; /* Pixel format in DRM definition */ int bpp; /* Bits per pixel, 0 indicates invalid */ const char *desc; /* The description */ }; static const struct pixel_format bdw_pixel_formats[] = { {DRM_FORMAT_C8, 8, "8-bit Indexed"}, {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, /* non-supported format has bpp default to 0 */ {0, 0, NULL}, }; static const struct pixel_format skl_pixel_formats[] = { {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"}, {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"}, {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"}, {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"}, {DRM_FORMAT_C8, 8, "8-bit Indexed"}, {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"}, {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"}, {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, /* non-supported format has bpp default to 0 */ {0, 0, NULL}, }; static int bdw_format_to_drm(int format) { int bdw_pixel_formats_index = 6; switch (format) { case DISP_FORMAT_8BPP: bdw_pixel_formats_index = 0; break; case DISP_FORMAT_BGRX565: bdw_pixel_formats_index = 1; break; case DISP_FORMAT_BGRX888: bdw_pixel_formats_index = 2; break; case DISP_FORMAT_RGBX101010: bdw_pixel_formats_index = 3; break; case DISP_FORMAT_BGRX101010: bdw_pixel_formats_index = 4; break; case DISP_FORMAT_RGBX888: bdw_pixel_formats_index = 5; break; default: break; } return bdw_pixel_formats_index; } static int skl_format_to_drm(int format, bool rgb_order, bool alpha, int yuv_order) { int skl_pixel_formats_index = 12; switch (format) { case PLANE_CTL_FORMAT_INDEXED: skl_pixel_formats_index = 4; break; case PLANE_CTL_FORMAT_RGB_565: skl_pixel_formats_index = 5; break; case PLANE_CTL_FORMAT_XRGB_8888: if (rgb_order) skl_pixel_formats_index = alpha ? 6 : 7; else skl_pixel_formats_index = alpha ? 8 : 9; break; case PLANE_CTL_FORMAT_XRGB_2101010: skl_pixel_formats_index = rgb_order ? 10 : 11; break; case PLANE_CTL_FORMAT_YUV422: skl_pixel_formats_index = yuv_order >> 16; if (skl_pixel_formats_index > 3) return -EINVAL; break; default: break; } return skl_pixel_formats_index; } static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, int stride_mask, int bpp) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; if (GRAPHICS_VER(dev_priv) >= 9) { switch (tiled) { case PLANE_CTL_TILED_LINEAR: stride = stride_reg * 64; break; case PLANE_CTL_TILED_X: stride = stride_reg * 512; break; case PLANE_CTL_TILED_Y: stride = stride_reg * 128; break; case PLANE_CTL_TILED_YF: if (bpp == 8) stride = stride_reg * 64; else if (bpp == 16 || bpp == 32 || bpp == 64) stride = stride_reg * 128; else gvt_dbg_core("skl: unsupported bpp:%d\n", bpp); break; default: gvt_dbg_core("skl: unsupported tile format:%x\n", tiled); } } return stride; } static int get_active_pipe(struct intel_vgpu *vgpu) { int i; for (i = 0; i < I915_MAX_PIPES; i++) if (pipe_is_enabled(vgpu, i)) break; return i; } /** * intel_vgpu_decode_primary_plane - Decode primary plane * @vgpu: input vgpu * @plane: primary plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 val, fmt; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); plane->enabled = !!(val & DISP_ENABLE); if (!plane->enabled) return -ENODEV; if (GRAPHICS_VER(dev_priv) >= 9) { plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( val & PLANE_CTL_FORMAT_MASK_SKL, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK, val & PLANE_CTL_YUV422_ORDER_MASK); if (fmt >= ARRAY_SIZE(skl_pixel_formats)) { gvt_vgpu_err("Out-of-bounds pixel format index\n"); return -EINVAL; } plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { plane->tiled = val & DISP_TILED; fmt = bdw_format_to_drm(val & DISP_FORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; } if (!plane->bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; } plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, (GRAPHICS_VER(dev_priv) >= 9) ? (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> _PIPE_H_SRCSZ_SHIFT; plane->width += 1; plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; plane->height += 1; /* raw height is one minus the real value */ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe)); plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> _PRI_PLANE_X_OFF_SHIFT; plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> _PRI_PLANE_Y_OFF_SHIFT; return 0; } #define CURSOR_FORMAT_NUM (1 << 6) struct cursor_mode_format { int drm_format; /* Pixel format in DRM definition */ u8 bpp; /* Bits per pixel; 0 indicates invalid */ u32 width; /* In pixel */ u32 height; /* In lines */ const char *desc; /* The description */ }; static const struct cursor_mode_format cursor_pixel_formats[] = { {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, /* non-supported format has bpp default to 0 */ {0, 0, 0, 0, NULL}, }; static int cursor_mode_to_drm(int mode) { int cursor_pixel_formats_index = 4; switch (mode) { case MCURSOR_MODE_128_ARGB_AX: cursor_pixel_formats_index = 0; break; case MCURSOR_MODE_256_ARGB_AX: cursor_pixel_formats_index = 1; break; case MCURSOR_MODE_64_ARGB_AX: cursor_pixel_formats_index = 2; break; case MCURSOR_MODE_64_32B_AX: cursor_pixel_formats_index = 3; break; default: break; } return cursor_pixel_formats_index; } /** * intel_vgpu_decode_cursor_plane - Decode sprite plane * @vgpu: input vgpu * @plane: cursor plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 val, mode, index; u32 alpha_plane, alpha_force; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); mode = val & MCURSOR_MODE_MASK; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; index = cursor_mode_to_drm(mode); if (!cursor_pixel_formats[index].bpp) { gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode); return -EINVAL; } plane->mode = mode; plane->bpp = cursor_pixel_formats[index].bpp; plane->drm_format = cursor_pixel_formats[index].drm_format; plane->width = cursor_pixel_formats[index].width; plane->height = cursor_pixel_formats[index].height; alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >> _CURSOR_ALPHA_PLANE_SHIFT; alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >> _CURSOR_ALPHA_FORCE_SHIFT; if (alpha_plane || alpha_force) gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } val = vgpu_vreg_t(vgpu, CURPOS(pipe)); plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)); plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; } #define SPRITE_FORMAT_NUM (1 << 3) static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, [0x4] = {DRM_FORMAT_AYUV, 32, "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"}, }; /** * intel_vgpu_decode_sprite_plane - Decode sprite plane * @vgpu: input vgpu * @plane: sprite plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, struct intel_vgpu_sprite_plane_format *plane) { u32 val, fmt; u32 color_order, yuv_order; int drm_format; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, SPRCTL(pipe)); plane->enabled = !!(val & SPRITE_ENABLE); if (!plane->enabled) return -ENODEV; plane->tiled = !!(val & SPRITE_TILED); color_order = !!(val & SPRITE_RGB_ORDER_RGBX); yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT; if (!sprite_pixel_formats[fmt].bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; } plane->hw_format = fmt; plane->bpp = sprite_pixel_formats[fmt].bpp; drm_format = sprite_pixel_formats[fmt].drm_format; /* Order of RGB values in an RGBxxx buffer may be ordered RGB or * BGR depending on the state of the color_order field */ if (!color_order) { if (drm_format == DRM_FORMAT_XRGB2101010) drm_format = DRM_FORMAT_XBGR2101010; else if (drm_format == DRM_FORMAT_XRGB8888) drm_format = DRM_FORMAT_XBGR8888; } if (drm_format == DRM_FORMAT_YUV422) { switch (yuv_order) { case 0: drm_format = DRM_FORMAT_YUYV; break; case 1: drm_format = DRM_FORMAT_UYVY; break; case 2: drm_format = DRM_FORMAT_YVYU; break; case 3: drm_format = DRM_FORMAT_VYUY; break; default: /* yuv_order has only 2 bits */ break; } } plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) & _SPRITE_STRIDE_MASK; val = vgpu_vreg_t(vgpu, SPRSIZE(pipe)); plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> _SPRITE_SIZE_HEIGHT_SHIFT; plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> _SPRITE_SIZE_WIDTH_SHIFT; plane->height += 1; /* raw height is one minus the real value */ plane->width += 1; /* raw width is one minus the real value */ val = vgpu_vreg_t(vgpu, SPRPOS(pipe)); plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; val = vgpu_vreg_t(vgpu, SPROFFSET(pipe)); plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> _SPRITE_OFFSET_START_X_SHIFT; plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> _SPRITE_OFFSET_START_Y_SHIFT; return 0; }
linux-master
drivers/gpu/drm/i915/gvt/fb_decoder.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Ke Yu * Zhiyuan Lv <[email protected]> * * Contributors: * Terrence Xu <[email protected]> * Changbin Du <[email protected]> * Bing Niu <[email protected]> * Zhi Wang <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #include "display/intel_display.h" #include "display/intel_dpio_phy.h" static int get_edp_pipe(struct intel_vgpu *vgpu) { u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP); int pipe = -1; switch (data & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: case TRANS_DDI_EDP_INPUT_A_ONOFF: pipe = PIPE_A; break; case TRANS_DDI_EDP_INPUT_B_ONOFF: pipe = PIPE_B; break; case TRANS_DDI_EDP_INPUT_C_ONOFF: pipe = PIPE_C; break; } return pipe; } static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE)) return 0; if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) return 0; return 1; } int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (drm_WARN_ON(&dev_priv->drm, pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE) return 1; if (edp_pipe_is_enabled(vgpu) && get_edp_pipe(vgpu) == pipe) return 1; return 0; } static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { { /* EDID with 1024x768 as its resolution */ /*Header*/ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, /* Vendor & Product Identification */ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, /* Version & Revision */ 0x01, 0x04, /* Basic Display Parameters & Features */ 0xa5, 0x34, 0x20, 0x78, 0x23, /* Color Characteristics */ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, /* Established Timings: maximum resolution is 1024x768 */ 0x21, 0x08, 0x00, /* Standard Timings. All invalid */ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, /* 18 Byte Data Blocks 1: invalid */ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, /* 18 Byte Data Blocks 2: invalid */ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, /* 18 Byte Data Blocks 3: invalid */ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, /* 18 Byte Data Blocks 4: invalid */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, /* Extension Block Count */ 0x00, /* Checksum */ 0xef, }, { /* EDID with 1920x1200 as its resolution */ /*Header*/ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, /* Vendor & Product Identification */ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, /* Version & Revision */ 0x01, 0x04, /* Basic Display Parameters & Features */ 0xa5, 0x34, 0x20, 0x78, 0x23, /* Color Characteristics */ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, /* Established Timings: maximum resolution is 1024x768 */ 0x21, 0x08, 0x00, /* * Standard Timings. * below new resolutions can be supported: * 1920x1080, 1280x720, 1280x960, 1280x1024, * 1440x900, 1600x1200, 1680x1050 */ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, /* 18 Byte Data Blocks 2: invalid */ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, /* 18 Byte Data Blocks 3: invalid */ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, /* 18 Byte Data Blocks 4: invalid */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, /* Extension Block Count */ 0x00, /* Checksum */ 0x45, }, }; #define DPCD_HEADER_SIZE 0xb /* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; int pipe; if (IS_BROXTON(dev_priv)) { enum transcoder trans; enum port port; /* Clear PIPE, DDI, PHY, HPD before setting new */ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &= ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE); vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); } vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); for (port = PORT_A; port <= PORT_C; port++) { vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |= (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK); vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &= ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | PORT_PLL_REF_SEL | PORT_PLL_LOCK | PORT_PLL_ENABLE); vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &= ~(DDI_INIT_DISPLAY_DETECTED | DDI_BUF_CTL_ENABLE); vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; } vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK); /* No hpd_invert set in vgpu vbt, need to clear invert mask */ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK; vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; /* * Only 1 PIPE enabled in current vGPU display and PIPE_A is * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, * TRANSCODER_A can be enabled. PORT_x depends on the input of * setup_virtual_dp_monitor. */ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; /* * Golden M/N are calculated based on: * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; /* Enable per-DDI/PORT vreg */ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |= BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK); vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |= (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | PORT_PLL_REF_SEL | PORT_PLL_LOCK | PORT_PLL_ENABLE); vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED); vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTA_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK); vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |= (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | PORT_PLL_REF_SEL | PORT_PLL_LOCK | PORT_PLL_ENABLE); vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTB_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK); vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |= (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | PORT_PLL_REF_SEL | PORT_PLL_LOCK | PORT_PLL_ENABLE); vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTC_HOTPLUG_ENABLE; vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); } return; } vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) { vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= SKL_FUSE_DOWNLOAD_STATUS | SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); /* * Only 1 PIPE enabled in current vGPU display and PIPE_A is * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, * TRANSCODER_A can be enabled. PORT_x depends on the input of * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x * so we fixed to DPLL0 here. * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode */ vgpu_vreg_t(vgpu, DPLL_CTRL1) = DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); vgpu_vreg_t(vgpu, DPLL_CTRL1) |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); vgpu_vreg_t(vgpu, LCPLL1_CTL) = LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); /* * Golden M/N are calculated based on: * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &= ~PORT_CLK_SEL_MASK; vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |= PORT_CLK_SEL_LCPLL_810; } vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &= ~PORT_CLK_SEL_MASK; vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |= PORT_CLK_SEL_LCPLL_810; } vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); vgpu_vreg_t(vgpu, DPLL_CTRL2) |= DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &= ~PORT_CLK_SEL_MASK; vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |= PORT_CLK_SEL_LCPLL_810; } vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); else vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; } /* Clear host CRT status, so guest couldn't detect this host CRT. */ if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; /* Disable Primary/Sprite/Cursor plane */ for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); kfree(port->edid); port->edid = NULL; kfree(port->dpcd); port->dpcd = NULL; } static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data) { struct intel_vgpu_vblank_timer *vblank_timer; struct intel_vgpu *vgpu; vblank_timer = container_of(data, struct intel_vgpu_vblank_timer, timer); vgpu = container_of(vblank_timer, struct intel_vgpu, vblank_timer); /* Set vblank emulation request per-vGPU bit */ intel_gvt_request_service(vgpu->gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK + vgpu->id); hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period); return HRTIMER_RESTART; } static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, int type, unsigned int resolution) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer; if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM)) return -EINVAL; port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); if (!port->edid) return -ENOMEM; port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL); if (!port->dpcd) { kfree(port->edid); return -ENOMEM; } memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], EDID_SIZE); port->edid->data_valid = true; memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); port->dpcd->data_valid = true; port->dpcd->data[DPCD_SINK_COUNT] = 0x1; port->type = type; port->id = resolution; port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC; vgpu->display.port_num = port_num; /* Init hrtimer based on default refresh rate */ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vblank_timer->timer.function = vblank_timer_fn; vblank_timer->vrefresh_k = port->vrefresh_k; vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k); emulate_monitor_status_change(vgpu); return 0; } /** * vgpu_update_vblank_emulation - Update per-vGPU vblank_timer * @vgpu: vGPU operated * @turnon: Turn ON/OFF vblank_timer * * This function is used to turn on/off or update the per-vGPU vblank_timer * when TRANSCONF is enabled or disabled. vblank_timer period is also updated * if guest changed the refresh rate. * */ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon) { struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, vgpu->display.port_num); if (turnon) { /* * Skip the re-enable if already active and vrefresh unchanged. * Otherwise, stop timer if already active and restart with new * period. */ if (vblank_timer->vrefresh_k != port->vrefresh_k || !hrtimer_active(&vblank_timer->timer)) { /* Stop timer before start with new period if active */ if (hrtimer_active(&vblank_timer->timer)) hrtimer_cancel(&vblank_timer->timer); /* Make sure new refresh rate updated to timer period */ vblank_timer->vrefresh_k = port->vrefresh_k; vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k); hrtimer_start(&vblank_timer->timer, ktime_add_ns(ktime_get(), vblank_timer->period), HRTIMER_MODE_ABS); } } else { /* Caller request to stop vblank */ hrtimer_cancel(&vblank_timer->timer); } } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_vgpu_irq *irq = &vgpu->irq; int vblank_event[] = { [PIPE_A] = PIPE_A_VBLANK, [PIPE_B] = PIPE_B_VBLANK, [PIPE_C] = PIPE_C_VBLANK, }; int event; if (pipe < PIPE_A || pipe > PIPE_C) return; for_each_set_bit(event, irq->flip_done_event[pipe], INTEL_GVT_EVENT_MAX) { clear_bit(event, irq->flip_done_event[pipe]); if (!pipe_is_enabled(vgpu, pipe)) continue; intel_vgpu_trigger_virtual_event(vgpu, event); } if (pipe_is_enabled(vgpu, pipe)) { vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++; intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]); } } void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu) { int pipe; mutex_lock(&vgpu->vgpu_lock); for_each_pipe(vgpu->gvt->gt->i915, pipe) emulate_vblank_on_pipe(vgpu, pipe); mutex_unlock(&vgpu->vgpu_lock); } /** * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU * @vgpu: a vGPU * @connected: link state * * This function is used to trigger hotplug interrupt for vGPU * */ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; /* TODO: add more platforms support */ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { if (connected) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; } else { vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT; } vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTD_HOTPLUG_STATUS_MASK; intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); } else if (IS_BROXTON(i915)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (connected) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } else { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~PORTA_HOTPLUG_STATUS_MASK; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTA_HOTPLUG_LONG_DETECT; intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { if (connected) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; } else { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; } vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~PORTB_HOTPLUG_STATUS_MASK; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTB_HOTPLUG_LONG_DETECT; intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { if (connected) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } else { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; } vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~PORTC_HOTPLUG_STATUS_MASK; vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTC_HOTPLUG_LONG_DETECT; intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG); } } } /** * intel_vgpu_clean_display - clean vGPU virtual display emulation * @vgpu: a vGPU * * This function is used to clean vGPU virtual display emulation stuffs * */ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); vgpu_update_vblank_emulation(vgpu, false); } /** * intel_vgpu_init_display- initialize vGPU virtual display emulation * @vgpu: a vGPU * @resolution: resolution index for intel_vgpu_edid * * This function is used to initialize vGPU virtual display emulation stuffs * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; intel_vgpu_init_i2c_edid(vgpu); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, resolution); else return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, resolution); } /** * intel_vgpu_reset_display- reset vGPU virtual display emulation * @vgpu: a vGPU * * This function is used to reset vGPU virtual display emulation stuffs * */ void intel_vgpu_reset_display(struct intel_vgpu *vgpu) { emulate_monitor_status_change(vgpu); }
linux-master
drivers/gpu/drm/i915/gvt/display.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <[email protected]> * Zhi Wang <[email protected]> * * Contributors: * Min he <[email protected]> * */ #include <linux/eventfd.h> #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #include "trace.h" /* common offset among interrupt control registers */ #define regbase_to_isr(base) (base) #define regbase_to_imr(base) (base + 0x4) #define regbase_to_iir(base) (base + 0x8) #define regbase_to_ier(base) (base + 0xC) #define iir_to_regbase(iir) (iir - 0x8) #define ier_to_regbase(ier) (ier - 0xC) #define get_event_virt_handler(irq, e) (irq->events[e].v_handler) #define get_irq_info(irq, e) (irq->events[e].info) #define irq_to_gvt(irq) \ container_of(irq, struct intel_gvt, irq) static void update_upstream_irq(struct intel_vgpu *vgpu, struct intel_gvt_irq_info *info); static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT", [RCS_DEBUG] = "Render EU debug from SVG", [RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status", [RCS_CMD_STREAMER_ERR] = "Render CS error interrupt", [RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify", [RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded", [RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults", [RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt", [VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT", [VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status", [VCS_CMD_STREAMER_ERR] = "Video CS error interrupt", [VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify", [VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded", [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults", [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt", [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT", [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify", [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt", [BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT", [BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status", [BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt", [BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify", [BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults", [BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt", [VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify", [VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt", [PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun", [PIPE_A_CRC_ERR] = "Pipe A CRC error", [PIPE_A_CRC_DONE] = "Pipe A CRC done", [PIPE_A_VSYNC] = "Pipe A vsync", [PIPE_A_LINE_COMPARE] = "Pipe A line compare", [PIPE_A_ODD_FIELD] = "Pipe A odd field", [PIPE_A_EVEN_FIELD] = "Pipe A even field", [PIPE_A_VBLANK] = "Pipe A vblank", [PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun", [PIPE_B_CRC_ERR] = "Pipe B CRC error", [PIPE_B_CRC_DONE] = "Pipe B CRC done", [PIPE_B_VSYNC] = "Pipe B vsync", [PIPE_B_LINE_COMPARE] = "Pipe B line compare", [PIPE_B_ODD_FIELD] = "Pipe B odd field", [PIPE_B_EVEN_FIELD] = "Pipe B even field", [PIPE_B_VBLANK] = "Pipe B vblank", [PIPE_C_VBLANK] = "Pipe C vblank", [DPST_PHASE_IN] = "DPST phase in event", [DPST_HISTOGRAM] = "DPST histogram event", [GSE] = "GSE", [DP_A_HOTPLUG] = "DP A Hotplug", [AUX_CHANNEL_A] = "AUX Channel A", [PERF_COUNTER] = "Performance counter", [POISON] = "Poison", [GTT_FAULT] = "GTT fault", [PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done", [PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done", [PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done", [SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done", [SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done", [SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done", [PCU_THERMAL] = "PCU Thermal Event", [PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event", [FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A", [AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A", [AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A", [FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B", [AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B", [AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B", [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C", [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C", [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C", [ERR_AND_DBG] = "South Error and Debug Interrupts Combined", [GMBUS] = "Gmbus", [SDVO_B_HOTPLUG] = "SDVO B hotplug", [CRT_HOTPLUG] = "CRT Hotplug", [DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug", [DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug", [DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug", [AUX_CHANNEL_B] = "AUX Channel B", [AUX_CHANNEL_C] = "AUX Channel C", [AUX_CHANNEL_D] = "AUX Channel D", [AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B", [AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C", [AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D", [INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!", }; static inline struct intel_gvt_irq_info *regbase_to_irq_info( struct intel_gvt *gvt, unsigned int reg) { struct intel_gvt_irq *irq = &gvt->irq; int i; for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) { if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg) return irq->info[i]; } return NULL; } /** * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler * @vgpu: a vGPU * @reg: register offset written by guest * @p_data: register data written by guest * @bytes: register data length * * This function is used to emulate the generic IMR register bit change * behavior. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 imr = *(u32 *)p_data; trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), (vgpu_vreg(vgpu, reg) ^ imr)); vgpu_vreg(vgpu, reg) = imr; ops->check_pending_irq(vgpu); return 0; } /** * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler * @vgpu: a vGPU * @reg: register offset written by guest * @p_data: register data written by guest * @bytes: register data length * * This function is used to emulate the master IRQ register on gen8+. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 ier = *(u32 *)p_data; u32 virtual_ier = vgpu_vreg(vgpu, reg); trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier, (virtual_ier ^ ier)); /* * GEN8_MASTER_IRQ is a special irq register, * only bit 31 is allowed to be modified * and treated as an IER bit. */ ier &= GEN8_MASTER_IRQ_CONTROL; virtual_ier &= GEN8_MASTER_IRQ_CONTROL; vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; vgpu_vreg(vgpu, reg) |= ier; ops->check_pending_irq(vgpu); return 0; } /** * intel_vgpu_reg_ier_handler - Generic IER write emulation handler * @vgpu: a vGPU * @reg: register offset written by guest * @p_data: register data written by guest * @bytes: register data length * * This function is used to emulate the generic IER register behavior. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; const struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_info *info; u32 ier = *(u32 *)p_data; trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg), (vgpu_vreg(vgpu, reg) ^ ier)); vgpu_vreg(vgpu, reg) = ier; info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); if (drm_WARN_ON(&i915->drm, !info)) return -EINVAL; if (info->has_upstream_irq) update_upstream_irq(vgpu, info); ops->check_pending_irq(vgpu); return 0; } /** * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler * @vgpu: a vGPU * @reg: register offset written by guest * @p_data: register data written by guest * @bytes: register data length * * This function is used to emulate the generic IIR register behavior. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, iir_to_regbase(reg)); u32 iir = *(u32 *)p_data; trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg), (vgpu_vreg(vgpu, reg) ^ iir)); if (drm_WARN_ON(&i915->drm, !info)) return -EINVAL; vgpu_vreg(vgpu, reg) &= ~iir; if (info->has_upstream_irq) update_upstream_irq(vgpu, info); return 0; } static struct intel_gvt_irq_map gen8_irq_map[] = { { INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff }, { INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 }, { INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff }, { INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 }, { INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff }, { INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff }, { INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 }, { INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 }, { -1, -1, ~0 }, }; static void update_upstream_irq(struct intel_vgpu *vgpu, struct intel_gvt_irq_info *info) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt_irq *irq = &vgpu->gvt->irq; struct intel_gvt_irq_map *map = irq->irq_map; struct intel_gvt_irq_info *up_irq_info = NULL; u32 set_bits = 0; u32 clear_bits = 0; int bit; u32 val = vgpu_vreg(vgpu, regbase_to_iir(i915_mmio_reg_offset(info->reg_base))) & vgpu_vreg(vgpu, regbase_to_ier(i915_mmio_reg_offset(info->reg_base))); if (!info->has_upstream_irq) return; for (map = irq->irq_map; map->up_irq_bit != -1; map++) { if (info->group != map->down_irq_group) continue; if (!up_irq_info) up_irq_info = irq->info[map->up_irq_group]; else drm_WARN_ON(&i915->drm, up_irq_info != irq->info[map->up_irq_group]); bit = map->up_irq_bit; if (val & map->down_irq_bitmask) set_bits |= (1 << bit); else clear_bits |= (1 << bit); } if (drm_WARN_ON(&i915->drm, !up_irq_info)) return; if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base); vgpu_vreg(vgpu, isr) &= ~clear_bits; vgpu_vreg(vgpu, isr) |= set_bits; } else { u32 iir = regbase_to_iir( i915_mmio_reg_offset(up_irq_info->reg_base)); u32 imr = regbase_to_imr( i915_mmio_reg_offset(up_irq_info->reg_base)); vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr)); } if (up_irq_info->has_upstream_irq) update_upstream_irq(vgpu, up_irq_info); } static void init_irq_map(struct intel_gvt_irq *irq) { struct intel_gvt_irq_map *map; struct intel_gvt_irq_info *up_info, *down_info; int up_bit; for (map = irq->irq_map; map->up_irq_bit != -1; map++) { up_info = irq->info[map->up_irq_group]; up_bit = map->up_irq_bit; down_info = irq->info[map->down_irq_group]; set_bit(up_bit, up_info->downstream_irq_bitmap); down_info->has_upstream_irq = true; gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n", up_info->group, up_bit, down_info->group, map->down_irq_bitmask); } } /* =======================vEvent injection===================== */ #define MSI_CAP_CONTROL(offset) (offset + 2) #define MSI_CAP_ADDRESS(offset) (offset + 4) #define MSI_CAP_DATA(offset) (offset + 8) #define MSI_CAP_EN 0x1 static int inject_virtual_interrupt(struct intel_vgpu *vgpu) { unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; u16 control, data; u32 addr; control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); /* Do not generate MSI if MSIEN is disabled */ if (!(control & MSI_CAP_EN)) return 0; if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) return -EINVAL; trace_inject_msi(vgpu->id, addr, data); /* * When guest is powered off, msi_trigger is set to NULL, but vgpu's * config and mmio register isn't restored to default during guest * poweroff. If this vgpu is still used in next vm, this vgpu's pipe * may be enabled, then once this vgpu is active, it will get inject * vblank interrupt request. But msi_trigger is null until msi is * enabled by guest. so if msi_trigger is null, success is still * returned and don't inject interrupt into guest. */ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) return -EFAULT; return 0; } static void propagate_event(struct intel_gvt_irq *irq, enum intel_gvt_event_type event, struct intel_vgpu *vgpu) { struct intel_gvt_irq_info *info; unsigned int reg_base; int bit; info = get_irq_info(irq, event); if (WARN_ON(!info)) return; reg_base = i915_mmio_reg_offset(info->reg_base); bit = irq->events[event].bit; if (!test_bit(bit, (void *)&vgpu_vreg(vgpu, regbase_to_imr(reg_base)))) { trace_propagate_event(vgpu->id, irq_name[event], bit); set_bit(bit, (void *)&vgpu_vreg(vgpu, regbase_to_iir(reg_base))); } } /* =======================vEvent Handlers===================== */ static void handle_default_event_virt(struct intel_gvt_irq *irq, enum intel_gvt_event_type event, struct intel_vgpu *vgpu) { if (!vgpu->irq.irq_warn_once[event]) { gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n", vgpu->id, event, irq_name[event]); vgpu->irq.irq_warn_once[event] = true; } propagate_event(irq, event, vgpu); } /* =====================GEN specific logic======================= */ /* GEN8 interrupt routines. */ #define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \ static struct intel_gvt_irq_info gen8_##regname##_info = { \ .name = #regname"-IRQ", \ .reg_base = (regbase), \ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \ INTEL_GVT_EVENT_RESERVED}, \ } DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C)); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR); DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ); static struct intel_gvt_irq_info gvt_base_pch_info = { .name = "PCH-IRQ", .reg_base = SDEISR, .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = INTEL_GVT_EVENT_RESERVED}, }; static void gen8_check_pending_irq(struct intel_vgpu *vgpu) { struct intel_gvt_irq *irq = &vgpu->gvt->irq; int i; if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) & GEN8_MASTER_IRQ_CONTROL)) return; for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) { struct intel_gvt_irq_info *info = irq->info[i]; u32 reg_base; if (!info->has_upstream_irq) continue; reg_base = i915_mmio_reg_offset(info->reg_base); if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base)) & vgpu_vreg(vgpu, regbase_to_ier(reg_base)))) update_upstream_irq(vgpu, info); } if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) & ~GEN8_MASTER_IRQ_CONTROL) inject_virtual_interrupt(vgpu); } static void gen8_init_irq( struct intel_gvt_irq *irq) { struct intel_gvt *gvt = irq_to_gvt(irq); #define SET_BIT_INFO(s, b, e, i) \ do { \ s->events[e].bit = b; \ s->events[e].info = s->info[i]; \ s->info[i]->bit_to_event[b] = e;\ } while (0) #define SET_IRQ_GROUP(s, g, i) \ do { \ s->info[g] = i; \ (i)->group = g; \ set_bit(g, s->irq_info_bitmap); \ } while (0) SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info); SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info); /* GEN8 level 2 interrupts. */ /* GEN8 interrupt GT0 events */ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); /* GEN8 interrupt GT1 events */ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); if (HAS_ENGINE(gvt->gt, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); } /* GEN8 interrupt GT3 events */ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C); /* GEN8 interrupt DE PORT events */ SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT); /* GEN8 interrupt DE MISC events */ SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC); /* PCH events */ SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); if (IS_BROADWELL(gvt->gt->i915)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } else if (GRAPHICS_VER(gvt->gt->i915) >= 9) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU); SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU); } static const struct intel_gvt_irq_ops gen8_irq_ops = { .init_irq = gen8_init_irq, .check_pending_irq = gen8_check_pending_irq, }; /** * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU * @vgpu: a vGPU * @event: interrupt event * * This function is used to trigger a virtual interrupt event for vGPU. * The caller provides the event to be triggered, the framework itself * will emulate the IRQ register bit change. * */ void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, enum intel_gvt_event_type event) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq *irq = &gvt->irq; gvt_event_virt_handler_t handler; const struct intel_gvt_irq_ops *ops = gvt->irq.ops; handler = get_event_virt_handler(irq, event); drm_WARN_ON(&i915->drm, !handler); handler(irq, event, vgpu); ops->check_pending_irq(vgpu); } static void init_events( struct intel_gvt_irq *irq) { int i; for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) { irq->events[i].info = NULL; irq->events[i].v_handler = handle_default_event_virt; } } /** * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem * @gvt: a GVT device * * This function is called at driver loading stage, to initialize the GVT-g IRQ * emulation subsystem. * * Returns: * Zero on success, negative error code if failed. */ int intel_gvt_init_irq(struct intel_gvt *gvt) { struct intel_gvt_irq *irq = &gvt->irq; gvt_dbg_core("init irq framework\n"); irq->ops = &gen8_irq_ops; irq->irq_map = gen8_irq_map; /* common event initialization */ init_events(irq); /* gen specific initialization */ irq->ops->init_irq(irq); init_irq_map(irq); return 0; }
linux-master
drivers/gpu/drm/i915/gvt/interrupt.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Ke Yu * Zhiyuan Lv <[email protected]> * * Contributors: * Terrence Xu <[email protected]> * Changbin Du <[email protected]> * Bing Niu <[email protected]> * Zhi Wang <[email protected]> * */ #include "display/intel_dp_aux_regs.h" #include "display/intel_gmbus_regs.h" #include "gvt.h" #include "i915_drv.h" #include "i915_reg.h" #define GMBUS1_TOTAL_BYTES_SHIFT 16 #define GMBUS1_TOTAL_BYTES_MASK 0x1ff #define gmbus1_total_byte_count(v) (((v) >> \ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK) #define gmbus1_slave_addr(v) (((v) & 0xff) >> 1) #define gmbus1_slave_index(v) (((v) >> 8) & 0xff) #define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7) /* GMBUS0 bits definitions */ #define _GMBUS_PIN_SEL_MASK (0x7) static unsigned char edid_get_byte(struct intel_vgpu *vgpu) { struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; unsigned char chr = 0; if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } if (edid->current_edid_read >= EDID_SIZE) { gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); return 0; } if (!edid->edid_available) { gvt_vgpu_err("Reading EDID but EDID is not available!\n"); return 0; } if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) { struct intel_vgpu_edid_data *edid_data = intel_vgpu_port(vgpu, edid->port)->edid; chr = edid_data->edid_block[edid->current_edid_read]; edid->current_edid_read++; } else { gvt_vgpu_err("No EDID available during the reading?\n"); } return chr; } static inline int cnp_get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; if (port_select == GMBUS_PIN_1_BXT) port = PORT_B; else if (port_select == GMBUS_PIN_2_BXT) port = PORT_C; else if (port_select == GMBUS_PIN_3_BXT) port = PORT_D; else if (port_select == GMBUS_PIN_4_CNP) port = PORT_E; return port; } static inline int bxt_get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; if (port_select == GMBUS_PIN_1_BXT) port = PORT_B; else if (port_select == GMBUS_PIN_2_BXT) port = PORT_C; else if (port_select == GMBUS_PIN_3_BXT) port = PORT_D; return port; } static inline int get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; if (port_select == GMBUS_PIN_VGADDC) port = PORT_E; else if (port_select == GMBUS_PIN_DPC) port = PORT_C; else if (port_select == GMBUS_PIN_DPB) port = PORT_B; else if (port_select == GMBUS_PIN_DPD) port = PORT_D; return port; } static void reset_gmbus_controller(struct intel_vgpu *vgpu) { vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; if (!vgpu->display.i2c_edid.edid_available) vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; } /* GMBUS0 */ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int port, pin_select; memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK; intel_vgpu_init_i2c_edid(vgpu); if (pin_select == 0) return 0; if (IS_BROXTON(i915)) port = bxt_get_port_from_gmbus0(pin_select); else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) port = cnp_get_port_from_gmbus0(pin_select); else port = get_port_from_gmbus0(pin_select); if (drm_WARN_ON(&i915->drm, port < 0)) return 0; vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE; if (intel_vgpu_has_monitor_on_port(vgpu, port) && !intel_vgpu_port_is_dp(vgpu, port)) { vgpu->display.i2c_edid.port = port; vgpu->display.i2c_edid.edid_available = true; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER; } else vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; return 0; } static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; u32 slave_addr; u32 wvalue = *(u32 *)p_data; if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) { if (!(wvalue & GMBUS_SW_CLR_INT)) { vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT; reset_gmbus_controller(vgpu); } /* * TODO: "This bit is cleared to zero when an event * causes the HW_RDY bit transition to occur " */ } else { /* * per bspec setting this bit can cause: * 1) INT status bit cleared * 2) HW_RDY bit asserted */ if (wvalue & GMBUS_SW_CLR_INT) { vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY; } /* For virtualization, we suppose that HW is always ready, * so GMBUS_SW_RDY should always be cleared */ if (wvalue & GMBUS_SW_RDY) wvalue &= ~GMBUS_SW_RDY; i2c_edid->gmbus.total_byte_count = gmbus1_total_byte_count(wvalue); slave_addr = gmbus1_slave_addr(wvalue); /* vgpu gmbus only support EDID */ if (slave_addr == EDID_ADDR) { i2c_edid->slave_selected = true; } else if (slave_addr != 0) { gvt_dbg_dpy( "vgpu%d: unsupported gmbus slave addr(0x%x)\n" " gmbus operations will be ignored.\n", vgpu->id, slave_addr); } if (wvalue & GMBUS_CYCLE_INDEX) i2c_edid->current_edid_read = gmbus1_slave_index(wvalue); i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue); switch (gmbus1_bus_cycle(wvalue)) { case GMBUS_NOCYCLE: break; case GMBUS_STOP: /* From spec: * This can only cause a STOP to be generated * if a GMBUS cycle is generated, the GMBUS is * currently in a data/wait/idle phase, or it is in a * WAIT phase */ if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset)) != GMBUS_NOCYCLE) { intel_vgpu_init_i2c_edid(vgpu); /* After the 'stop' cycle, hw state would become * 'stop phase' and then 'idle phase' after a * few milliseconds. In emulation, we just set * it as 'idle phase' ('stop phase' is not * visible in gmbus interface) */ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; } break; case NIDX_NS_W: case IDX_NS_W: case NIDX_STOP: case IDX_STOP: /* From hw spec the GMBUS phase * transition like this: * START (-->INDEX) -->DATA */ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; break; default: gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); break; } /* * From hw spec the WAIT state will be * cleared: * (1) in a new GMBUS cycle * (2) by generating a stop */ vgpu_vreg(vgpu, offset) = wvalue; } return 0; } static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; drm_WARN_ON(&i915->drm, 1); return 0; } static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { int i; unsigned char byte_data; struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; int byte_left = i2c_edid->gmbus.total_byte_count - i2c_edid->current_edid_read; int byte_count = byte_left; u32 reg_data = 0; /* Data can only be recevied if previous settings correct */ if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { if (byte_left <= 0) { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); return 0; } if (byte_count > 4) byte_count = 4; for (i = 0; i < byte_count; i++) { byte_data = edid_get_byte(vgpu); reg_data |= (byte_data << (i << 3)); } memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); if (byte_left <= 4) { switch (i2c_edid->gmbus.cycle_type) { case NIDX_STOP: case IDX_STOP: i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE; break; case NIDX_NS_W: case IDX_NS_W: default: i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE; break; } intel_vgpu_init_i2c_edid(vgpu); } /* * Read GMBUS3 during send operation, * return the latest written value */ } else { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); gvt_vgpu_err("warning: gmbus3 read with nothing returned\n"); } return 0; } static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 value = vgpu_vreg(vgpu, offset); if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE)) vgpu_vreg(vgpu, offset) |= GMBUS_INUSE; memcpy(p_data, (void *)&value, bytes); return 0; } static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 wvalue = *(u32 *)p_data; if (wvalue & GMBUS_INUSE) vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE; /* All other bits are read-only */ return 0; } /** * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read * @vgpu: a vGPU * @offset: reg offset * @p_data: data return buffer * @bytes: access data length * * This function is used to emulate gmbus register mmio read * * Returns: * Zero on success, negative error code if failed. * */ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) return -EINVAL; if (offset == i915_mmio_reg_offset(PCH_GMBUS2)) return gmbus2_mmio_read(vgpu, offset, p_data, bytes); else if (offset == i915_mmio_reg_offset(PCH_GMBUS3)) return gmbus3_mmio_read(vgpu, offset, p_data, bytes); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); return 0; } /** * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write * @vgpu: a vGPU * @offset: reg offset * @p_data: data return buffer * @bytes: access data length * * This function is used to emulate gmbus register mmio write * * Returns: * Zero on success, negative error code if failed. * */ int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) return -EINVAL; if (offset == i915_mmio_reg_offset(PCH_GMBUS0)) return gmbus0_mmio_write(vgpu, offset, p_data, bytes); else if (offset == i915_mmio_reg_offset(PCH_GMBUS1)) return gmbus1_mmio_write(vgpu, offset, p_data, bytes); else if (offset == i915_mmio_reg_offset(PCH_GMBUS2)) return gmbus2_mmio_write(vgpu, offset, p_data, bytes); else if (offset == i915_mmio_reg_offset(PCH_GMBUS3)) return gmbus3_mmio_write(vgpu, offset, p_data, bytes); memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); return 0; } enum { AUX_CH_CTL = 0, AUX_CH_DATA1, AUX_CH_DATA2, AUX_CH_DATA3, AUX_CH_DATA4, AUX_CH_DATA5 }; static inline int get_aux_ch_reg(unsigned int offset) { int reg; switch (offset & 0xff) { case 0x10: reg = AUX_CH_CTL; break; case 0x14: reg = AUX_CH_DATA1; break; case 0x18: reg = AUX_CH_DATA2; break; case 0x1c: reg = AUX_CH_DATA3; break; case 0x20: reg = AUX_CH_DATA4; break; case 0x24: reg = AUX_CH_DATA5; break; default: reg = -1; break; } return reg; } /** * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write * @vgpu: a vGPU * @port_idx: port index * @offset: reg offset * @p_data: write ptr * * This function is used to emulate AUX channel register write * */ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, int port_idx, unsigned int offset, void *p_data) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; int msg_length, ret_msg_size; int msg, addr, ctrl, op; u32 value = *(u32 *)p_data; int aux_data_for_write = 0; int reg = get_aux_ch_reg(offset); if (reg != AUX_CH_CTL) { vgpu_vreg(vgpu, offset) = value; return; } msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; ctrl = (msg >> 24) & 0xff; op = ctrl >> 4; if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) { /* The ctl write to clear some states */ return; } /* Always set the wanted value for vms. */ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1); vgpu_vreg(vgpu, offset) = DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size); if (msg_length == 3) { if (!(op & GVT_AUX_I2C_MOT)) { /* stop */ intel_vgpu_init_i2c_edid(vgpu); } else { /* start or restart */ i2c_edid->aux_ch.i2c_over_aux_ch = true; i2c_edid->aux_ch.aux_ch_mot = true; if (addr == 0) { /* reset the address */ intel_vgpu_init_i2c_edid(vgpu); } else if (addr == EDID_ADDR) { i2c_edid->state = I2C_AUX_CH; i2c_edid->port = port_idx; i2c_edid->slave_selected = true; if (intel_vgpu_has_monitor_on_port(vgpu, port_idx) && intel_vgpu_port_is_dp(vgpu, port_idx)) i2c_edid->edid_available = true; } } } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { /* TODO * We only support EDID reading from I2C_over_AUX. And * we do not expect the index mode to be used. Right now * the WRITE operation is ignored. It is good enough to * support the gfx driver to do EDID access. */ } else { if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ)) return; if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; if (i2c_edid->edid_available && i2c_edid->slave_selected) { unsigned char val = edid_get_byte(vgpu); aux_data_for_write = (val << 16); } else aux_data_for_write = (0xff << 16); } /* write the return value in AUX_CH_DATA reg which includes: * ACK of I2C_WRITE * returned byte if it is READ */ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24; vgpu_vreg(vgpu, offset + 4) = aux_data_for_write; } /** * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation * @vgpu: a vGPU * * This function is used to initialize vGPU i2c edid emulation stuffs * */ void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu) { struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; edid->state = I2C_NOT_SPECIFIED; edid->port = -1; edid->slave_selected = false; edid->edid_available = false; edid->current_edid_read = 0; memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus)); edid->aux_ch.i2c_over_aux_ch = false; edid->aux_ch.aux_ch_mot = false; }
linux-master
drivers/gpu/drm/i915/gvt/edid.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Anhua Xu * Kevin Tian <[email protected]> * * Contributors: * Min He <[email protected]> * Bing Niu <[email protected]> * Zhi Wang <[email protected]> * */ #include "i915_drv.h" #include "gvt.h" static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) { enum intel_engine_id i; struct intel_engine_cs *engine; for_each_engine(engine, vgpu->gvt->gt, i) { if (!list_empty(workload_q_head(vgpu, engine))) return true; } return false; } /* We give 2 seconds higher prio for vGPU during start */ #define GVT_SCHED_VGPU_PRI_TIME 2 struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; bool active; bool pri_sched; ktime_t pri_time; ktime_t sched_in_time; ktime_t sched_time; ktime_t left_ts; ktime_t allocated_ts; struct vgpu_sched_ctl sched_ctl; }; struct gvt_sched_data { struct intel_gvt *gvt; struct hrtimer timer; unsigned long period; struct list_head lru_runq_head; ktime_t expire_time; }; static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) { ktime_t delta_ts; struct vgpu_sched_data *vgpu_data; if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) return; vgpu_data = vgpu->sched_data; delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); vgpu_data->sched_in_time = cur_time; } #define GVT_TS_BALANCE_PERIOD_MS 100 #define GVT_TS_BALANCE_STAGE_NUM 10 static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; static u64 stage_check; int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt. */ if (stage == 0) { int total_weight = 0; ktime_t fair_timeslice; list_for_each(pos, &sched_data->lru_runq_head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); total_weight += vgpu_data->sched_ctl.weight; } list_for_each(pos, &sched_data->lru_runq_head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS), total_weight) * vgpu_data->sched_ctl.weight; vgpu_data->allocated_ts = fair_timeslice; vgpu_data->left_ts = vgpu_data->allocated_ts; } } else { list_for_each(pos, &sched_data->lru_runq_head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); /* timeslice for next 100ms should add the left/debt * slice of previous stages. */ vgpu_data->left_ts += vgpu_data->allocated_ts; } } } static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id i; struct intel_engine_cs *engine; struct vgpu_sched_data *vgpu_data; ktime_t cur_time; /* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL. */ if (scheduler->next_vgpu == scheduler->current_vgpu) { scheduler->next_vgpu = NULL; return; } /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu */ scheduler->need_reschedule = true; /* still have uncompleted workload? */ for_each_engine(engine, gvt->gt, i) { if (scheduler->current_workload[engine->id]) return; } cur_time = ktime_get(); vgpu_update_timeslice(scheduler->current_vgpu, cur_time); vgpu_data = scheduler->next_vgpu->sched_data; vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ scheduler->current_vgpu = scheduler->next_vgpu; scheduler->next_vgpu = NULL; scheduler->need_reschedule = false; /* wake up workload dispatch thread */ for_each_engine(engine, gvt->gt, i) wake_up(&scheduler->waitq[engine->id]); } static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) { struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; struct list_head *head = &sched_data->lru_runq_head; struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; if (vgpu_data->pri_sched) { if (ktime_before(ktime_get(), vgpu_data->pri_time)) { vgpu = vgpu_data->vgpu; break; } else vgpu_data->pri_sched = false; } /* Return the vGPU only if it has time slice left */ if (vgpu_data->left_ts > 0) { vgpu = vgpu_data->vgpu; break; } } return vgpu; } /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 static void tbs_sched_func(struct gvt_sched_data *sched_data) { struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; /* no active vgpu or has already had a target */ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) goto out; vgpu = find_busy_vgpu(sched_data); if (vgpu) { scheduler->next_vgpu = vgpu; vgpu_data = vgpu->sched_data; if (!vgpu_data->pri_sched) { /* Move the last used vGPU to the tail of lru_list */ list_del_init(&vgpu_data->lru_list); list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); } } else { scheduler->next_vgpu = gvt->idle_vgpu; } out: if (scheduler->next_vgpu) try_to_schedule_next_vgpu(gvt); } void intel_gvt_schedule(struct intel_gvt *gvt) { struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request)) { if (cur_time >= sched_data->expire_time) { gvt_balance_timeslice(sched_data); sched_data->expire_time = ktime_add_ms( cur_time, GVT_TS_BALANCE_PERIOD_MS); } } clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); tbs_sched_func(sched_data); mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) { struct gvt_sched_data *data; data = container_of(timer_data, struct gvt_sched_data, timer); intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); hrtimer_add_expires_ns(&data->timer, data->period); return HRTIMER_RESTART; } static int tbs_sched_init(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct gvt_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_LIST_HEAD(&data->lru_runq_head); hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; data->gvt = gvt; scheduler->sched_data = data; return 0; } static void tbs_sched_clean(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct gvt_sched_data *data = scheduler->sched_data; hrtimer_cancel(&data->timer); kfree(data); scheduler->sched_data = NULL; } static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { struct vgpu_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->sched_ctl.weight = vgpu->sched_ctl.weight; data->vgpu = vgpu; INIT_LIST_HEAD(&data->lru_list); vgpu->sched_data = data; return 0; } static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; kfree(vgpu->sched_data); vgpu->sched_data = NULL; /* this vgpu id has been removed */ if (idr_is_empty(&gvt->vgpu_idr)) hrtimer_cancel(&sched_data->timer); } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; ktime_t now; if (!list_empty(&vgpu_data->lru_list)) return; now = ktime_get(); vgpu_data->pri_time = ktime_add(now, ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); vgpu_data->pri_sched = true; list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->lru_list); vgpu_data->active = false; } static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = { .init = tbs_sched_init, .clean = tbs_sched_clean, .init_vgpu = tbs_sched_init_vgpu, .clean_vgpu = tbs_sched_clean_vgpu, .start_schedule = tbs_sched_start_schedule, .stop_schedule = tbs_sched_stop_schedule, }; int intel_gvt_init_sched_policy(struct intel_gvt *gvt) { int ret; mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops = &tbs_schedule_ops; ret = gvt->scheduler.sched_ops->init(gvt); mutex_unlock(&gvt->sched_lock); return ret; } void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) { mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops->clean(gvt); mutex_unlock(&gvt->sched_lock); } /* for per-vgpu scheduler policy, there are 2 per-vgpu data: * sched_data, and sched_ctl. We see these 2 data as part of * the global scheduler which are proteced by gvt->sched_lock. * Caller should make their decision if the vgpu_lock should * be hold outside. */ int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) { int ret; mutex_lock(&vgpu->gvt->sched_lock); ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); mutex_unlock(&vgpu->gvt->sched_lock); return ret; } void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) { mutex_lock(&vgpu->gvt->sched_lock); vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); mutex_unlock(&vgpu->gvt->sched_lock); } void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { struct vgpu_sched_data *vgpu_data = vgpu->sched_data; mutex_lock(&vgpu->gvt->sched_lock); if (!vgpu_data->active) { gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } mutex_unlock(&vgpu->gvt->sched_lock); } void intel_gvt_kick_schedule(struct intel_gvt *gvt) { mutex_lock(&gvt->sched_lock); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->sched_lock); } void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; if (!vgpu_data->active) return; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); if (scheduler->next_vgpu == vgpu) scheduler->next_vgpu = NULL; if (scheduler->current_vgpu == vgpu) { /* stop workload dispatching */ scheduler->need_reschedule = true; scheduler->current_vgpu = NULL; } intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for_each_engine(engine, vgpu->gvt->gt, id) { if (scheduler->engine_owner[engine->id] == vgpu) { intel_gvt_switch_mmio(vgpu, NULL, engine); scheduler->engine_owner[engine->id] = NULL; } } spin_unlock_bh(&scheduler->mmio_context_lock); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); mutex_unlock(&vgpu->gvt->sched_lock); }
linux-master
drivers/gpu/drm/i915/gvt/sched_policy.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <[email protected]> * Dexuan Cui * * Contributors: * Pei Zhang <[email protected]> * Min He <[email protected]> * Niu Bing <[email protected]> * Yulei Zhang <[email protected]> * Zhenyu Wang <[email protected]> * Zhi Wang <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "gt/intel_ggtt_fencing.h" #include "gvt.h" static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gt *gt = gvt->gt; unsigned int flags; u64 start, end, size; struct drm_mm_node *node; int ret; if (high_gm) { node = &vgpu->gm.high_gm_node; size = vgpu_hidden_sz(vgpu); start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); flags = PIN_HIGH; } else { node = &vgpu->gm.low_gm_node; size = vgpu_aperture_sz(vgpu); start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); flags = PIN_MAPPABLE; } mutex_lock(&gt->ggtt->vm.mutex); mmio_hw_access_pre(gt); ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); mmio_hw_access_post(gt); mutex_unlock(&gt->ggtt->vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); return ret; } static int alloc_vgpu_gm(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gt *gt = gvt->gt; int ret; ret = alloc_gm(vgpu, false); if (ret) return ret; ret = alloc_gm(vgpu, true); if (ret) goto out_free_aperture; gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id, vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu)); gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id, vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu)); return 0; out_free_aperture: mutex_lock(&gt->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); mutex_unlock(&gt->ggtt->vm.mutex); return ret; } static void free_vgpu_gm(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gt *gt = gvt->gt; mutex_lock(&gt->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); mutex_unlock(&gt->ggtt->vm.mutex); } /** * intel_vgpu_write_fence - write fence registers owned by a vGPU * @vgpu: vGPU instance * @fence: vGPU fence register number * @value: Fence register value to be written * * This function is used to write fence registers owned by a vGPU. The vGPU * fence register number will be translated into HW fence register number. * */ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; assert_rpm_wakelock_held(uncore->rpm); if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu))) return; reg = vgpu->fence.regs[fence]; if (drm_WARN_ON(&i915->drm, !reg)) return; fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); intel_uncore_write(uncore, fence_reg_lo, 0); intel_uncore_posting_read(uncore, fence_reg_lo); intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value)); intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value)); intel_uncore_posting_read(uncore, fence_reg_lo); } static void _clear_vgpu_fence(struct intel_vgpu *vgpu) { int i; for (i = 0; i < vgpu_fence_sz(vgpu); i++) intel_vgpu_write_fence(vgpu, i, 0); } static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; intel_wakeref_t wakeref; u32 i; if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu))) return; wakeref = intel_runtime_pm_get(uncore->rpm); mutex_lock(&gvt->gt->ggtt->vm.mutex); _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } mutex_unlock(&gvt->gt->ggtt->vm.mutex); intel_runtime_pm_put(uncore->rpm, wakeref); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; intel_wakeref_t wakeref; int i; wakeref = intel_runtime_pm_get(uncore->rpm); /* Request fences from host */ mutex_lock(&gvt->gt->ggtt->vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = i915_reserve_fence(gvt->gt->ggtt); if (IS_ERR(reg)) goto out_free_fence; vgpu->fence.regs[i] = reg; } _clear_vgpu_fence(vgpu); mutex_unlock(&gvt->gt->ggtt->vm.mutex); intel_runtime_pm_put(uncore->rpm, wakeref); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); /* Return fences to host, if fail */ for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; if (!reg) continue; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } mutex_unlock(&gvt->gt->ggtt->vm.mutex); intel_runtime_pm_put_unchecked(uncore->rpm); return -ENOSPC; } static void free_resource(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu); gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu); gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu); } static int alloc_resource(struct intel_vgpu *vgpu, const struct intel_vgpu_config *conf) { struct intel_gvt *gvt = vgpu->gvt; unsigned long request, avail, max, taken; const char *item; if (!conf->low_mm || !conf->high_mm || !conf->fence) { gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } item = "low GM space"; max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; taken = gvt->gm.vgpu_allocated_low_gm_size; avail = max - taken; request = conf->low_mm; if (request > avail) goto no_enough_resource; vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); item = "high GM space"; max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; taken = gvt->gm.vgpu_allocated_high_gm_size; avail = max - taken; request = conf->high_mm; if (request > avail) goto no_enough_resource; vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); item = "fence"; max = gvt_fence_sz(gvt) - HOST_FENCE; taken = gvt->fence.vgpu_allocated_fence_num; avail = max - taken; request = conf->fence; if (request > avail) goto no_enough_resource; vgpu_fence_sz(vgpu) = request; gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm; gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm; gvt->fence.vgpu_allocated_fence_num += conf->fence; return 0; no_enough_resource: gvt_err("fail to allocate resource %s\n", item); gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n", BYTES_TO_MB(request), BYTES_TO_MB(avail), BYTES_TO_MB(max), BYTES_TO_MB(taken)); return -ENOSPC; } /** * intel_vgpu_free_resource() - free HW resource owned by a vGPU * @vgpu: a vGPU * * This function is used to free the HW resource owned by a vGPU. * */ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) { free_vgpu_gm(vgpu); free_vgpu_fence(vgpu); free_resource(vgpu); } /** * intel_vgpu_reset_resource - reset resource state owned by a vGPU * @vgpu: a vGPU * * This function is used to reset resource state owned by a vGPU. * */ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; intel_wakeref_t wakeref; with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref) _clear_vgpu_fence(vgpu); } /** * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU * @vgpu: vGPU * @conf: vGPU creation params * * This function is used to allocate HW resource for a vGPU. User specifies * the resource configuration through the creation params. * * Returns: * zero on success, negative error code if failed. * */ int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, const struct intel_vgpu_config *conf) { int ret; ret = alloc_resource(vgpu, conf); if (ret) return ret; ret = alloc_vgpu_gm(vgpu); if (ret) goto out_free_resource; ret = alloc_vgpu_fence(vgpu); if (ret) goto out_free_vgpu_gm; return 0; out_free_vgpu_gm: free_vgpu_gm(vgpu); out_free_resource: free_resource(vgpu); return ret; }
linux-master
drivers/gpu/drm/i915/gvt/aperture_gm.c
/* * KVMGT - the implementation of Intel mediated pass-through framework for KVM * * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <[email protected]> * Jike Song <[email protected]> * Xiaoguang Chen <[email protected]> * Eddie Dong <[email protected]> * * Contributors: * Niu Bing <[email protected]> * Zhi Wang <[email protected]> */ #include <linux/init.h> #include <linux/mm.h> #include <linux/kthread.h> #include <linux/sched/mm.h> #include <linux/types.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/eventfd.h> #include <linux/mdev.h> #include <linux/debugfs.h> #include <linux/nospec.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "intel_gvt.h" #include "gvt.h" MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(I915_GVT); /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) #define EDID_BLOB_OFFSET (PAGE_SIZE/2) #define OPREGION_SIGNATURE "IntelGraphicsMem" struct vfio_region; struct intel_vgpu_regops { size_t (*rw)(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite); void (*release)(struct intel_vgpu *vgpu, struct vfio_region *region); }; struct vfio_region { u32 type; u32 subtype; size_t size; u32 flags; const struct intel_vgpu_regops *ops; void *data; }; struct vfio_edid_region { struct vfio_region_gfx_edid vfio_edid_regs; void *edid_blob; }; struct kvmgt_pgfn { gfn_t gfn; struct hlist_node hnode; }; struct gvt_dma { struct intel_vgpu *vgpu; struct rb_node gfn_node; struct rb_node dma_addr_node; gfn_t gfn; dma_addr_t dma_addr; unsigned long size; struct kref ref; }; #define vfio_dev_to_vgpu(vfio_dev) \ container_of((vfio_dev), struct intel_vgpu, vfio_device) static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node); static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, struct kvm_page_track_notifier_node *node); static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf) { struct intel_vgpu_type *type = container_of(mtype, struct intel_vgpu_type, type); return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" "fence: %d\nresolution: %s\n" "weight: %d\n", BYTES_TO_MB(type->conf->low_mm), BYTES_TO_MB(type->conf->high_mm), type->conf->fence, vgpu_edid_str(type->conf->edid), type->conf->weight); } static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, DIV_ROUND_UP(size, PAGE_SIZE)); } /* Pin a normal or compound guest page for dma. */ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { int total_pages = DIV_ROUND_UP(size, PAGE_SIZE); struct page *base_page = NULL; int npage; int ret; /* * We pin the pages one-by-one to avoid allocating a big arrary * on stack to hold pfns. */ for (npage = 0; npage < total_pages; npage++) { dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; struct page *cur_page; ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1, IOMMU_READ | IOMMU_WRITE, &cur_page); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n", &cur_iova, ret); goto err; } if (npage == 0) base_page = cur_page; else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) { ret = -EINVAL; npage++; goto err; } } *page = base_page; return 0; err: if (npage) gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); return ret; } static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t *dma_addr, unsigned long size) { struct device *dev = vgpu->gvt->gt->i915->drm.dev; struct page *page = NULL; int ret; ret = gvt_pin_guest_page(vgpu, gfn, size, &page); if (ret) return ret; /* Setup DMA mapping. */ *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) { gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", page_to_pfn(page), ret); gvt_unpin_guest_page(vgpu, gfn, size); return -ENOMEM; } return 0; } static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) { struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL); gvt_unpin_guest_page(vgpu, gfn, size); } static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct rb_node *node = vgpu->dma_addr_cache.rb_node; struct gvt_dma *itr; while (node) { itr = rb_entry(node, struct gvt_dma, dma_addr_node); if (dma_addr < itr->dma_addr) node = node->rb_left; else if (dma_addr > itr->dma_addr) node = node->rb_right; else return itr; } return NULL; } static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) { struct rb_node *node = vgpu->gfn_cache.rb_node; struct gvt_dma *itr; while (node) { itr = rb_entry(node, struct gvt_dma, gfn_node); if (gfn < itr->gfn) node = node->rb_left; else if (gfn > itr->gfn) node = node->rb_right; else return itr; } return NULL; } static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, dma_addr_t dma_addr, unsigned long size) { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) return -ENOMEM; new->vgpu = vgpu; new->gfn = gfn; new->dma_addr = dma_addr; new->size = size; kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ link = &vgpu->gfn_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, gfn_node); if (gfn < itr->gfn) link = &parent->rb_left; else link = &parent->rb_right; } rb_link_node(&new->gfn_node, parent, link); rb_insert_color(&new->gfn_node, &vgpu->gfn_cache); /* dma_addr_cache maps dma addr to struct gvt_dma. */ parent = NULL; link = &vgpu->dma_addr_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, dma_addr_node); if (dma_addr < itr->dma_addr) link = &parent->rb_left; else link = &parent->rb_right; } rb_link_node(&new->dma_addr_node, parent, link); rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache); vgpu->nr_cache_entries++; return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, struct gvt_dma *entry) { rb_erase(&entry->gfn_node, &vgpu->gfn_cache); rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache); kfree(entry); vgpu->nr_cache_entries--; } static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; for (;;) { mutex_lock(&vgpu->cache_lock); node = rb_first(&vgpu->gfn_cache); if (!node) { mutex_unlock(&vgpu->cache_lock); break; } dma = rb_entry(node, struct gvt_dma, gfn_node); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); mutex_unlock(&vgpu->cache_lock); } } static void gvt_cache_init(struct intel_vgpu *vgpu) { vgpu->gfn_cache = RB_ROOT; vgpu->dma_addr_cache = RB_ROOT; vgpu->nr_cache_entries = 0; mutex_init(&vgpu->cache_lock); } static void kvmgt_protect_table_init(struct intel_vgpu *info) { hash_init(info->ptable); } static void kvmgt_protect_table_destroy(struct intel_vgpu *info) { struct kvmgt_pgfn *p; struct hlist_node *tmp; int i; hash_for_each_safe(info->ptable, i, tmp, p, hnode) { hash_del(&p->hnode); kfree(p); } } static struct kvmgt_pgfn * __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p, *res = NULL; lockdep_assert_held(&info->vgpu_lock); hash_for_each_possible(info->ptable, p, hnode, gfn) { if (gfn == p->gfn) { res = p; break; } } return res; } static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; p = __kvmgt_protect_table_find(info, gfn); return !!p; } static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; if (kvmgt_gfn_is_write_protected(info, gfn)) return; p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); if (WARN(!p, "gfn: 0x%llx\n", gfn)) return; p->gfn = gfn; hash_add(info->ptable, &p->hnode, gfn); } static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; p = __kvmgt_protect_table_find(info, gfn); if (p) { hash_del(&p->hnode); kfree(p); } } static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; void *base = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos >= vgpu->region[i].size || iswrite) { gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); return -EINVAL; } count = min(count, (size_t)(vgpu->region[i].size - pos)); memcpy(buf, base + pos, count); return count; } static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu, struct vfio_region *region) { } static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { .rw = intel_vgpu_reg_rw_opregion, .release = intel_vgpu_reg_release_opregion, }; static int handle_edid_regs(struct intel_vgpu *vgpu, struct vfio_edid_region *region, char *buf, size_t count, u16 offset, bool is_write) { struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs; unsigned int data; if (offset + count > sizeof(*regs)) return -EINVAL; if (count != 4) return -EINVAL; if (is_write) { data = *((unsigned int *)buf); switch (offset) { case offsetof(struct vfio_region_gfx_edid, link_state): if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) { if (!drm_edid_block_valid( (u8 *)region->edid_blob, 0, true, NULL)) { gvt_vgpu_err("invalid EDID blob\n"); return -EINVAL; } intel_vgpu_emulate_hotplug(vgpu, true); } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) intel_vgpu_emulate_hotplug(vgpu, false); else { gvt_vgpu_err("invalid EDID link state %d\n", regs->link_state); return -EINVAL; } regs->link_state = data; break; case offsetof(struct vfio_region_gfx_edid, edid_size): if (data > regs->edid_max_size) { gvt_vgpu_err("EDID size is bigger than %d!\n", regs->edid_max_size); return -EINVAL; } regs->edid_size = data; break; default: /* read-only regs */ gvt_vgpu_err("write read-only EDID region at offset %d\n", offset); return -EPERM; } } else { memcpy(buf, (char *)regs + offset, count); } return count; } static int handle_edid_blob(struct vfio_edid_region *region, char *buf, size_t count, u16 offset, bool is_write) { if (offset + count > region->vfio_edid_regs.edid_size) return -EINVAL; if (is_write) memcpy(region->edid_blob + offset, buf, count); else memcpy(buf, region->edid_blob + offset, count); return count; } static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { int ret; unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; struct vfio_edid_region *region = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos < region->vfio_edid_regs.edid_offset) { ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite); } else { pos -= EDID_BLOB_OFFSET; ret = handle_edid_blob(region, buf, count, pos, iswrite); } if (ret < 0) gvt_vgpu_err("failed to access EDID region\n"); return ret; } static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu, struct vfio_region *region) { kfree(region->data); } static const struct intel_vgpu_regops intel_vgpu_regops_edid = { .rw = intel_vgpu_reg_rw_edid, .release = intel_vgpu_reg_release_edid, }; static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, unsigned int type, unsigned int subtype, const struct intel_vgpu_regops *ops, size_t size, u32 flags, void *data) { struct vfio_region *region; region = krealloc(vgpu->region, (vgpu->num_regions + 1) * sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; vgpu->region = region; vgpu->region[vgpu->num_regions].type = type; vgpu->region[vgpu->num_regions].subtype = subtype; vgpu->region[vgpu->num_regions].ops = ops; vgpu->region[vgpu->num_regions].size = size; vgpu->region[vgpu->num_regions].flags = flags; vgpu->region[vgpu->num_regions].data = data; vgpu->num_regions++; return 0; } int intel_gvt_set_opregion(struct intel_vgpu *vgpu) { void *base; int ret; /* Each vgpu has its own opregion, although VFIO would create another * one later. This one is used to expose opregion to VFIO. And the * other one created by VFIO later, is used by guest actually. */ base = vgpu_opregion(vgpu)->va; if (!base) return -ENOMEM; if (memcmp(base, OPREGION_SIGNATURE, 16)) { memunmap(base); return -EINVAL; } ret = intel_vgpu_register_reg(vgpu, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &intel_vgpu_regops_opregion, OPREGION_SIZE, VFIO_REGION_INFO_FLAG_READ, base); return ret; } int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; int ret; base = kzalloc(sizeof(*base), GFP_KERNEL); if (!base) return -ENOMEM; /* TODO: Add multi-port and EDID extension block support */ base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET; base->vfio_edid_regs.edid_max_size = EDID_SIZE; base->vfio_edid_regs.edid_size = EDID_SIZE; base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id); base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id); base->edid_blob = port->edid->edid_block; ret = intel_vgpu_register_reg(vgpu, VFIO_REGION_TYPE_GFX, VFIO_REGION_SUBTYPE_GFX_EDID, &intel_vgpu_regops_edid, EDID_SIZE, VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE | VFIO_REGION_INFO_FLAG_CAPS, base); return ret; } static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova, u64 length) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); struct gvt_dma *entry; u64 iov_pfn = iova >> PAGE_SHIFT; u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE; mutex_lock(&vgpu->cache_lock); for (; iov_pfn < end_iov_pfn; iov_pfn++) { entry = __gvt_cache_find_gfn(vgpu, iov_pfn); if (!entry) continue; gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, entry->size); __gvt_cache_remove_entry(vgpu, entry); } mutex_unlock(&vgpu->cache_lock); } static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) { struct intel_vgpu *itr; int id; bool ret = false; mutex_lock(&vgpu->gvt->lock); for_each_active_vgpu(vgpu->gvt, itr, id) { if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status)) continue; if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) { ret = true; goto out; } } out: mutex_unlock(&vgpu->gvt->lock); return ret; } static int intel_vgpu_open_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); int ret; if (__kvmgt_vgpu_exist(vgpu)) return -EEXIST; vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region; ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); if (ret) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return ret; } set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, &vgpu->nr_cache_entries); intel_gvt_activate_vgpu(vgpu); return 0; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) { struct eventfd_ctx *trigger; trigger = vgpu->msi_trigger; if (trigger) { eventfd_ctx_put(trigger); vgpu->msi_trigger = NULL; } } static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); intel_gvt_release_vgpu(vgpu); clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status); debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs); kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, &vgpu->track_node); kvmgt_protect_table_destroy(vgpu); gvt_cache_destroy(vgpu); WARN_ON(vgpu->nr_cache_entries); vgpu->gfn_cache = RB_ROOT; vgpu->dma_addr_cache = RB_ROOT; intel_vgpu_release_msi_eventfd_ctx(vgpu); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) { u32 start_lo, start_hi; u32 mem_type; start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_MASK; mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_64: start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar + 4)); break; case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: /* 1M mem BAR treated as 32-bit BAR */ default: /* mem unknown type treated as 32-bit BAR */ start_hi = 0; break; } return ((u64)start_hi << 32) | start_lo; } static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, void *buf, unsigned int count, bool is_write) { u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar); int ret; if (is_write) ret = intel_vgpu_emulate_mmio_write(vgpu, bar_start + off, buf, count); else ret = intel_vgpu_emulate_mmio_read(vgpu, bar_start + off, buf, count); return ret; } static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) { return off >= vgpu_aperture_offset(vgpu) && off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); } static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, void *buf, unsigned long count, bool is_write) { void __iomem *aperture_va; if (!intel_vgpu_in_aperture(vgpu, off) || !intel_vgpu_in_aperture(vgpu, off + count)) { gvt_vgpu_err("Invalid aperture offset %llu\n", off); return -EINVAL; } aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap, ALIGN_DOWN(off, PAGE_SIZE), count + offset_in_page(off)); if (!aperture_va) return -EIO; if (is_write) memcpy_toio(aperture_va + offset_in_page(off), buf, count); else memcpy_fromio(buf, aperture_va + offset_in_page(off), count); io_mapping_unmap(aperture_va); return 0; } static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool is_write) { unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } switch (index) { case VFIO_PCI_CONFIG_REGION_INDEX: if (is_write) ret = intel_vgpu_emulate_cfg_write(vgpu, pos, buf, count); else ret = intel_vgpu_emulate_cfg_read(vgpu, pos, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos, buf, count, is_write); break; case VFIO_PCI_BAR2_REGION_INDEX: ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); break; case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: case VFIO_PCI_BAR4_REGION_INDEX: case VFIO_PCI_BAR5_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX: case VFIO_PCI_ROM_REGION_INDEX: break; default: if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) return -EINVAL; index -= VFIO_PCI_NUM_REGIONS; return vgpu->region[index].ops->rw(vgpu, buf, count, ppos, is_write); } return ret == 0 ? count : ret; } static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) { unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); struct intel_gvt *gvt = vgpu->gvt; int offset; /* Only allow MMIO GGTT entry access */ if (index != PCI_BASE_ADDRESS_0) return false; offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); return (offset >= gvt->device_info.gtt_start_offset && offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? true : false; } static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf, size_t count, loff_t *ppos) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; while (count) { size_t filled; /* Only support GGTT entry 8 bytes read */ if (count >= 8 && !(*ppos % 8) && gtt_entry(vgpu, ppos)) { u64 val; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; if (copy_to_user(buf, &val, sizeof(val))) goto read_err; filled = 8; } else if (count >= 4 && !(*ppos % 4)) { u32 val; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; if (copy_to_user(buf, &val, sizeof(val))) goto read_err; filled = 4; } else if (count >= 2 && !(*ppos % 2)) { u16 val; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; if (copy_to_user(buf, &val, sizeof(val))) goto read_err; filled = 2; } else { u8 val; ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; if (copy_to_user(buf, &val, sizeof(val))) goto read_err; filled = 1; } count -= filled; done += filled; *ppos += filled; buf += filled; } return done; read_err: return -EFAULT; } static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev, const char __user *buf, size_t count, loff_t *ppos) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; while (count) { size_t filled; /* Only support GGTT entry 8 bytes write */ if (count >= 8 && !(*ppos % 8) && gtt_entry(vgpu, ppos)) { u64 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; filled = 8; } else if (count >= 4 && !(*ppos % 4)) { u32 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; filled = 4; } else if (count >= 2 && !(*ppos % 2)) { u16 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; filled = 2; } else { u8 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; filled = 1; } count -= filled; done += filled; *ppos += filled; buf += filled; } return done; write_err: return -EFAULT; } static int intel_vgpu_mmap(struct vfio_device *vfio_dev, struct vm_area_struct *vma) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int index; u64 virtaddr; unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index >= VFIO_PCI_ROM_REGION_INDEX) return -EINVAL; if (vma->vm_end < vma->vm_start) return -EINVAL; if ((vma->vm_flags & VM_SHARED) == 0) return -EINVAL; if (index != VFIO_PCI_BAR2_REGION_INDEX) return -EINVAL; pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; pgoff = vma->vm_pgoff & ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); req_start = pgoff << PAGE_SHIFT; if (!intel_vgpu_in_aperture(vgpu, req_start)) return -EINVAL; if (req_start + req_size > vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) return -EINVAL; pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) { if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX) return 1; return 0; } static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, u32 flags, void *data) { return 0; } static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, u32 flags, void *data) { return 0; } static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, u32 flags, void *data) { return 0; } static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, u32 flags, void *data) { struct eventfd_ctx *trigger; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int fd = *(int *)data; trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) { gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } vgpu->msi_trigger = trigger; } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) intel_vgpu_release_msi_eventfd_ctx(vgpu); return 0; } static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, unsigned int index, unsigned int start, unsigned int count, void *data) { int (*func)(struct intel_vgpu *vgpu, unsigned int index, unsigned int start, unsigned int count, u32 flags, void *data) = NULL; switch (index) { case VFIO_PCI_INTX_IRQ_INDEX: switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK: func = intel_vgpu_set_intx_mask; break; case VFIO_IRQ_SET_ACTION_UNMASK: func = intel_vgpu_set_intx_unmask; break; case VFIO_IRQ_SET_ACTION_TRIGGER: func = intel_vgpu_set_intx_trigger; break; } break; case VFIO_PCI_MSI_IRQ_INDEX: switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK: case VFIO_IRQ_SET_ACTION_UNMASK: /* XXX Need masking support exported */ break; case VFIO_IRQ_SET_ACTION_TRIGGER: func = intel_vgpu_set_msi_trigger; break; } break; } if (!func) return -ENOTTY; return func(vgpu, index, start, count, flags, data); } static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd, unsigned long arg) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); if (cmd == VFIO_DEVICE_GET_INFO) { struct vfio_device_info info; minsz = offsetofend(struct vfio_device_info, num_irqs); if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; if (info.argsz < minsz) return -EINVAL; info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; info.num_regions = VFIO_PCI_NUM_REGIONS + vgpu->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { struct vfio_region_info info; struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; unsigned int i; int ret; struct vfio_region_info_cap_sparse_mmap *sparse = NULL; int nr_areas = 1; int cap_type_id; minsz = offsetofend(struct vfio_region_info, offset); if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; if (info.argsz < minsz) return -EINVAL; switch (info.index) { case VFIO_PCI_CONFIG_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = vgpu->gvt->device_info.cfg_space_size; info.flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; break; case VFIO_PCI_BAR0_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = vgpu->cfg_space.bar[info.index].size; if (!info.size) { info.flags = 0; break; } info.flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; break; case VFIO_PCI_BAR1_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = 0; info.flags = 0; break; case VFIO_PCI_BAR2_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = VFIO_REGION_INFO_FLAG_CAPS | VFIO_REGION_INFO_FLAG_MMAP | VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; info.size = gvt_aperture_sz(vgpu->gvt); sparse = kzalloc(struct_size(sparse, areas, nr_areas), GFP_KERNEL); if (!sparse) return -ENOMEM; sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; sparse->header.version = 1; sparse->nr_areas = nr_areas; cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; sparse->areas[0].offset = PAGE_ALIGN(vgpu_aperture_offset(vgpu)); sparse->areas[0].size = vgpu_aperture_sz(vgpu); break; case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = 0; info.flags = 0; gvt_dbg_core("get region info bar:%d\n", info.index); break; case VFIO_PCI_ROM_REGION_INDEX: case VFIO_PCI_VGA_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = 0; info.flags = 0; gvt_dbg_core("get region info index:%d\n", info.index); break; default: { struct vfio_region_info_cap_type cap_type = { .header.id = VFIO_REGION_INFO_CAP_TYPE, .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) return -EINVAL; info.index = array_index_nospec(info.index, VFIO_PCI_NUM_REGIONS + vgpu->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.size = vgpu->region[i].size; info.flags = vgpu->region[i].flags; cap_type.type = vgpu->region[i].type; cap_type.subtype = vgpu->region[i].subtype; ret = vfio_info_add_capability(&caps, &cap_type.header, sizeof(cap_type)); if (ret) return ret; } } if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { switch (cap_type_id) { case VFIO_REGION_INFO_CAP_SPARSE_MMAP: ret = vfio_info_add_capability(&caps, &sparse->header, struct_size(sparse, areas, sparse->nr_areas)); if (ret) { kfree(sparse); return ret; } break; default: kfree(sparse); return -EINVAL; } } if (caps.size) { info.flags |= VFIO_REGION_INFO_FLAG_CAPS; if (info.argsz < sizeof(info) + caps.size) { info.argsz = sizeof(info) + caps.size; info.cap_offset = 0; } else { vfio_info_cap_shift(&caps, sizeof(info)); if (copy_to_user((void __user *)arg + sizeof(info), caps.buf, caps.size)) { kfree(caps.buf); kfree(sparse); return -EFAULT; } info.cap_offset = sizeof(info); } kfree(caps.buf); } kfree(sparse); return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { struct vfio_irq_info info; minsz = offsetofend(struct vfio_irq_info, count); if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) return -EINVAL; switch (info.index) { case VFIO_PCI_INTX_IRQ_INDEX: case VFIO_PCI_MSI_IRQ_INDEX: break; default: return -EINVAL; } info.flags = VFIO_IRQ_INFO_EVENTFD; info.count = intel_vgpu_get_irq_count(vgpu, info.index); if (info.index == VFIO_PCI_INTX_IRQ_INDEX) info.flags |= (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED); else info.flags |= VFIO_IRQ_INFO_NORESIZE; return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } else if (cmd == VFIO_DEVICE_SET_IRQS) { struct vfio_irq_set hdr; u8 *data = NULL; int ret = 0; size_t data_size = 0; minsz = offsetofend(struct vfio_irq_set, count); if (copy_from_user(&hdr, (void __user *)arg, minsz)) return -EFAULT; if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { int max = intel_vgpu_get_irq_count(vgpu, hdr.index); ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS, &data_size); if (ret) { gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); return -EINVAL; } if (data_size) { data = memdup_user((void __user *)(arg + minsz), data_size); if (IS_ERR(data)) return PTR_ERR(data); } } ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, hdr.start, hdr.count, data); kfree(data); return ret; } else if (cmd == VFIO_DEVICE_RESET) { intel_gvt_reset_vgpu(vgpu); return 0; } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { struct vfio_device_gfx_plane_info dmabuf; int ret = 0; minsz = offsetofend(struct vfio_device_gfx_plane_info, dmabuf_id); if (copy_from_user(&dmabuf, (void __user *)arg, minsz)) return -EFAULT; if (dmabuf.argsz < minsz) return -EINVAL; ret = intel_vgpu_query_plane(vgpu, &dmabuf); if (ret != 0) return ret; return copy_to_user((void __user *)arg, &dmabuf, minsz) ? -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { __u32 dmabuf_id; if (get_user(dmabuf_id, (__u32 __user *)arg)) return -EFAULT; return intel_vgpu_get_dmabuf(vgpu, dmabuf_id); } return -ENOTTY; } static ssize_t vgpu_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_vgpu *vgpu = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vgpu->id); } static DEVICE_ATTR_RO(vgpu_id); static struct attribute *intel_vgpu_attrs[] = { &dev_attr_vgpu_id.attr, NULL }; static const struct attribute_group intel_vgpu_group = { .name = "intel_vgpu", .attrs = intel_vgpu_attrs, }; static const struct attribute_group *intel_vgpu_groups[] = { &intel_vgpu_group, NULL, }; static int intel_vgpu_init_dev(struct vfio_device *vfio_dev) { struct mdev_device *mdev = to_mdev_device(vfio_dev->dev); struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); struct intel_vgpu_type *type = container_of(mdev->type, struct intel_vgpu_type, type); int ret; vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt; ret = intel_gvt_create_vgpu(vgpu, type->conf); if (ret) return ret; kvmgt_protect_table_init(vgpu); gvt_cache_init(vgpu); return 0; } static void intel_vgpu_release_dev(struct vfio_device *vfio_dev) { struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); intel_gvt_destroy_vgpu(vgpu); } static const struct vfio_device_ops intel_vgpu_dev_ops = { .init = intel_vgpu_init_dev, .release = intel_vgpu_release_dev, .open_device = intel_vgpu_open_device, .close_device = intel_vgpu_close_device, .read = intel_vgpu_read, .write = intel_vgpu_write, .mmap = intel_vgpu_mmap, .ioctl = intel_vgpu_ioctl, .dma_unmap = intel_vgpu_dma_unmap, .bind_iommufd = vfio_iommufd_emulated_bind, .unbind_iommufd = vfio_iommufd_emulated_unbind, .attach_ioas = vfio_iommufd_emulated_attach_ioas, .detach_ioas = vfio_iommufd_emulated_detach_ioas, }; static int intel_vgpu_probe(struct mdev_device *mdev) { struct intel_vgpu *vgpu; int ret; vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev, &intel_vgpu_dev_ops); if (IS_ERR(vgpu)) { gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); return PTR_ERR(vgpu); } dev_set_drvdata(&mdev->dev, vgpu); ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); if (ret) goto out_put_vdev; gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); return 0; out_put_vdev: vfio_put_device(&vgpu->vfio_device); return ret; } static void intel_vgpu_remove(struct mdev_device *mdev) { struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&vgpu->vfio_device); vfio_put_device(&vgpu->vfio_device); } static unsigned int intel_vgpu_get_available(struct mdev_type *mtype) { struct intel_vgpu_type *type = container_of(mtype, struct intel_vgpu_type, type); struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt; unsigned int low_gm_avail, high_gm_avail, fence_avail; mutex_lock(&gvt->lock); low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; mutex_unlock(&gvt->lock); return min3(low_gm_avail / type->conf->low_mm, high_gm_avail / type->conf->high_mm, fence_avail / type->conf->fence); } static struct mdev_driver intel_vgpu_mdev_driver = { .device_api = VFIO_DEVICE_API_PCI_STRING, .driver = { .name = "intel_vgpu_mdev", .owner = THIS_MODULE, .dev_groups = intel_vgpu_groups, }, .probe = intel_vgpu_probe, .remove = intel_vgpu_remove, .get_available = intel_vgpu_get_available, .show_description = intel_vgpu_show_description, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { int r; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; if (kvmgt_gfn_is_write_protected(info, gfn)) return 0; r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn); if (r) return r; kvmgt_protect_table_add(info, gfn); return 0; } int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { int r; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status)) return -ESRCH; if (!kvmgt_gfn_is_write_protected(info, gfn)) return 0; r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn); if (r) return r; kvmgt_protect_table_del(info, gfn); return 0; } static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node) { struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); mutex_lock(&info->vgpu_lock); if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT)) intel_vgpu_page_track_handler(info, gpa, (void *)val, len); mutex_unlock(&info->vgpu_lock); } static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, struct kvm_page_track_notifier_node *node) { unsigned long i; struct intel_vgpu *info = container_of(node, struct intel_vgpu, track_node); mutex_lock(&info->vgpu_lock); for (i = 0; i < nr_pages; i++) { if (kvmgt_gfn_is_write_protected(info, gfn + i)) kvmgt_protect_table_del(info, gfn + i); } mutex_unlock(&info->vgpu_lock); } void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; if (!vgpu->region) return; for (i = 0; i < vgpu->num_regions; i++) if (vgpu->region[i].ops->release) vgpu->region[i].ops->release(vgpu, &vgpu->region[i]); vgpu->num_regions = 0; kfree(vgpu->region); vgpu->region = NULL; } int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct gvt_dma *entry; int ret; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -EINVAL; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); if (!entry) { ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else if (entry->size != size) { /* the same gfn with different size: unmap and re-map */ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); __gvt_cache_remove_entry(vgpu, entry); ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else { kref_get(&entry->ref); *dma_addr = entry->dma_addr; } mutex_unlock(&vgpu->cache_lock); return 0; err_unmap: gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: mutex_unlock(&vgpu->cache_lock); return ret; } int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; int ret = 0; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -EINVAL; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; mutex_unlock(&vgpu->cache_lock); return ret; } static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, entry->size); __gvt_cache_remove_entry(entry->vgpu, entry); } void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { struct gvt_dma *entry; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return; mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_put(&entry->ref, __gvt_dma_release); mutex_unlock(&vgpu->cache_lock); } static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); info->max_support_vgpus = 8; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->mmio_size = 2 * 1024 * 1024; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_entry_size = 8; info->gtt_entry_size_shift = 3; info->gmadr_bytes_in_cmd = 8; info->max_surface_size = 36 * 1024 * 1024; info->msi_cap_offset = pdev->msi_cap; } static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) { struct intel_vgpu *vgpu; int id; mutex_lock(&gvt->lock); idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, (void *)&gvt->service_request)) { if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) intel_vgpu_emulate_vblank(vgpu); } } mutex_unlock(&gvt->lock); } static int gvt_service_thread(void *data) { struct intel_gvt *gvt = (struct intel_gvt *)data; int ret; gvt_dbg_core("service thread start\n"); while (!kthread_should_stop()) { ret = wait_event_interruptible(gvt->service_thread_wq, kthread_should_stop() || gvt->service_request); if (kthread_should_stop()) break; if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) continue; intel_gvt_test_and_emulate_vblank(gvt); if (test_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request) || test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request)) { intel_gvt_schedule(gvt); } } return 0; } static void clean_service_thread(struct intel_gvt *gvt) { kthread_stop(gvt->service_thread); } static int init_service_thread(struct intel_gvt *gvt) { init_waitqueue_head(&gvt->service_thread_wq); gvt->service_thread = kthread_run(gvt_service_thread, gvt, "gvt_service_thread"); if (IS_ERR(gvt->service_thread)) { gvt_err("fail to start service thread.\n"); return PTR_ERR(gvt->service_thread); } return 0; } /** * intel_gvt_clean_device - clean a GVT device * @i915: i915 private * * This function is called at the driver unloading stage, to free the * resources owned by a GVT device. * */ static void intel_gvt_clean_device(struct drm_i915_private *i915) { struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); if (drm_WARN_ON(&i915->drm, !gvt)) return; mdev_unregister_parent(&gvt->parent); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); intel_gvt_debugfs_clean(gvt); clean_service_thread(gvt); intel_gvt_clean_cmd_parser(gvt); intel_gvt_clean_sched_policy(gvt); intel_gvt_clean_workload_scheduler(gvt); intel_gvt_clean_gtt(gvt); intel_gvt_free_firmware(gvt); intel_gvt_clean_mmio_info(gvt); idr_destroy(&gvt->vgpu_idr); kfree(i915->gvt); } /** * intel_gvt_init_device - initialize a GVT device * @i915: drm i915 private data * * This function is called at the initialization stage, to initialize * necessary GVT components. * * Returns: * Zero on success, negative error code if failed. * */ static int intel_gvt_init_device(struct drm_i915_private *i915) { struct intel_gvt *gvt; struct intel_vgpu *vgpu; int ret; if (drm_WARN_ON(&i915->drm, i915->gvt)) return -EEXIST; gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); if (!gvt) return -ENOMEM; gvt_dbg_core("init gvt device\n"); idr_init_base(&gvt->vgpu_idr, 1); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); mutex_init(&gvt->sched_lock); gvt->gt = to_gt(i915); i915->gvt = gvt; init_device_info(gvt); ret = intel_gvt_setup_mmio_info(gvt); if (ret) goto out_clean_idr; intel_gvt_init_engine_mmio_context(gvt); ret = intel_gvt_load_firmware(gvt); if (ret) goto out_clean_mmio_info; ret = intel_gvt_init_irq(gvt); if (ret) goto out_free_firmware; ret = intel_gvt_init_gtt(gvt); if (ret) goto out_free_firmware; ret = intel_gvt_init_workload_scheduler(gvt); if (ret) goto out_clean_gtt; ret = intel_gvt_init_sched_policy(gvt); if (ret) goto out_clean_workload_scheduler; ret = intel_gvt_init_cmd_parser(gvt); if (ret) goto out_clean_sched_policy; ret = init_service_thread(gvt); if (ret) goto out_clean_cmd_parser; ret = intel_gvt_init_vgpu_types(gvt); if (ret) goto out_clean_thread; vgpu = intel_gvt_create_idle_vgpu(gvt); if (IS_ERR(vgpu)) { ret = PTR_ERR(vgpu); gvt_err("failed to create idle vgpu\n"); goto out_clean_types; } gvt->idle_vgpu = vgpu; intel_gvt_debugfs_init(gvt); ret = mdev_register_parent(&gvt->parent, i915->drm.dev, &intel_vgpu_mdev_driver, gvt->mdev_types, gvt->num_types); if (ret) goto out_destroy_idle_vgpu; gvt_dbg_core("gvt device initialization is done\n"); return 0; out_destroy_idle_vgpu: intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_debugfs_clean(gvt); out_clean_types: intel_gvt_clean_vgpu_types(gvt); out_clean_thread: clean_service_thread(gvt); out_clean_cmd_parser: intel_gvt_clean_cmd_parser(gvt); out_clean_sched_policy: intel_gvt_clean_sched_policy(gvt); out_clean_workload_scheduler: intel_gvt_clean_workload_scheduler(gvt); out_clean_gtt: intel_gvt_clean_gtt(gvt); out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); out_clean_idr: idr_destroy(&gvt->vgpu_idr); kfree(gvt); i915->gvt = NULL; return ret; } static void intel_gvt_pm_resume(struct drm_i915_private *i915) { struct intel_gvt *gvt = i915->gvt; intel_gvt_restore_fence(gvt); intel_gvt_restore_mmio(gvt); intel_gvt_restore_ggtt(gvt); } static const struct intel_vgpu_ops intel_gvt_vgpu_ops = { .init_device = intel_gvt_init_device, .clean_device = intel_gvt_clean_device, .pm_resume = intel_gvt_pm_resume, }; static int __init kvmgt_init(void) { int ret; ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops); if (ret) return ret; ret = mdev_register_driver(&intel_vgpu_mdev_driver); if (ret) intel_gvt_clear_ops(&intel_gvt_vgpu_ops); return ret; } static void __exit kvmgt_exit(void) { mdev_unregister_driver(&intel_vgpu_mdev_driver); intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } module_init(kvmgt_init); module_exit(kvmgt_exit); MODULE_LICENSE("GPL and additional rights"); MODULE_AUTHOR("Intel Corporation");
linux-master
drivers/gpu/drm/i915/gvt/kvmgt.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/acpi.h> #include "i915_drv.h" #include "gvt.h" /* * Note: Only for GVT-g virtual VBT generation, other usage must * not do like this. */ #define _INTEL_BIOS_PRIVATE #include "display/intel_vbt_defs.h" #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_VBT (1<<3) /* device handle */ #define DEVICE_TYPE_CRT 0x01 #define DEVICE_TYPE_EFP1 0x04 #define DEVICE_TYPE_EFP2 0x40 #define DEVICE_TYPE_EFP3 0x20 #define DEVICE_TYPE_EFP4 0x10 struct opregion_header { u8 signature[16]; u32 size; u32 opregion_ver; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u32 driver_model; u32 pcon; u8 dver[32]; u8 rsvd[124]; } __packed; struct bdb_data_header { u8 id; u16 size; /* data size */ } __packed; /* For supporting windows guest with opregion, here hardcode the emulated * bdb header version as '186', and the corresponding child_device_config * length should be '33' but not '38'. */ struct efp_child_device_config { u16 handle; u16 device_type; u16 device_class; u8 i2c_speed; u8 dp_onboard_redriver; /* 158 */ u8 dp_ondock_redriver; /* 158 */ u8 hdmi_level_shifter_value:4; /* 169 */ u8 hdmi_max_data_rate:4; /* 204 */ u16 dtd_buf_ptr; /* 161 */ u8 edidless_efp:1; /* 161 */ u8 compression_enable:1; /* 198 */ u8 compression_method:1; /* 198 */ u8 ganged_edp:1; /* 202 */ u8 skip0:4; u8 compression_structure_index:4; /* 198 */ u8 skip1:4; u8 slave_port; /* 202 */ u8 skip2; u8 dvo_port; u8 i2c_pin; /* for add-in card */ u8 slave_addr; /* for add-in card */ u8 ddc_pin; u16 edid_ptr; u8 dvo_config; u8 efp_docked_port:1; /* 158 */ u8 lane_reversal:1; /* 184 */ u8 onboard_lspcon:1; /* 192 */ u8 iboost_enable:1; /* 196 */ u8 hpd_invert:1; /* BXT 196 */ u8 slip3:3; u8 hdmi_compat:1; u8 dp_compat:1; u8 tmds_compat:1; u8 skip4:5; u8 aux_channel; u8 dongle_detect; u8 pipe_cap:2; u8 sdvo_stall:1; /* 158 */ u8 hpd_status:2; u8 integrated_encoder:1; u8 skip5:2; u8 dvo_wiring; u8 mipi_bridge_type; /* 171 */ u16 device_class_ext; u8 dvo_function; } __packed; struct vbt { /* header->bdb_offset point to bdb_header offset */ struct vbt_header header; struct bdb_header bdb_header; struct bdb_data_header general_features_header; struct bdb_general_features general_features; struct bdb_data_header general_definitions_header; struct bdb_general_definitions general_definitions; struct efp_child_device_config child0; struct efp_child_device_config child1; struct efp_child_device_config child2; struct efp_child_device_config child3; struct bdb_data_header driver_features_header; struct bdb_driver_features driver_features; }; static void virt_vbt_generation(struct vbt *v) { int num_child; memset(v, 0, sizeof(struct vbt)); v->header.signature[0] = '$'; v->header.signature[1] = 'V'; v->header.signature[2] = 'B'; v->header.signature[3] = 'T'; /* there's features depending on version! */ v->header.version = 155; v->header.header_size = sizeof(v->header); v->header.vbt_size = sizeof(struct vbt); v->header.bdb_offset = offsetof(struct vbt, bdb_header); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); v->bdb_header.version = 186; /* child_dev_size = 33 */ v->bdb_header.header_size = sizeof(v->bdb_header); v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header); /* general features */ v->general_features_header.id = BDB_GENERAL_FEATURES; v->general_features_header.size = sizeof(struct bdb_general_features); v->general_features.int_crt_support = 0; v->general_features.int_tv_support = 0; /* child device */ num_child = 4; /* each port has one child */ v->general_definitions.child_dev_size = sizeof(struct efp_child_device_config); v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; /* size will include child devices */ v->general_definitions_header.size = sizeof(struct bdb_general_definitions) + num_child * v->general_definitions.child_dev_size; /* portA */ v->child0.handle = DEVICE_TYPE_EFP1; v->child0.device_type = DEVICE_TYPE_DP; v->child0.dvo_port = DVO_PORT_DPA; v->child0.aux_channel = DP_AUX_A; v->child0.dp_compat = true; v->child0.integrated_encoder = true; /* portB */ v->child1.handle = DEVICE_TYPE_EFP2; v->child1.device_type = DEVICE_TYPE_DP; v->child1.dvo_port = DVO_PORT_DPB; v->child1.aux_channel = DP_AUX_B; v->child1.dp_compat = true; v->child1.integrated_encoder = true; /* portC */ v->child2.handle = DEVICE_TYPE_EFP3; v->child2.device_type = DEVICE_TYPE_DP; v->child2.dvo_port = DVO_PORT_DPC; v->child2.aux_channel = DP_AUX_C; v->child2.dp_compat = true; v->child2.integrated_encoder = true; /* portD */ v->child3.handle = DEVICE_TYPE_EFP4; v->child3.device_type = DEVICE_TYPE_DP; v->child3.dvo_port = DVO_PORT_DPD; v->child3.aux_channel = DP_AUX_D; v->child3.dp_compat = true; v->child3.integrated_encoder = true; /* driver features */ v->driver_features_header.id = BDB_DRIVER_FEATURES; v->driver_features_header.size = sizeof(struct bdb_driver_features); v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS; } /** * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) { u8 *buf; struct opregion_header *header; struct vbt v; const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) { gvt_err("fail to get memory for vgpu virt opregion\n"); return -ENOMEM; } /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; memcpy(header->signature, opregion_signature, sizeof(opregion_signature)); header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; /* for unknown reason, the value in LID field is incorrect * which block the windows guest, so workaround it by force * setting it to "OPEN" */ buf[INTEL_GVT_OPREGION_CLID] = 0x3; /* emulated vbt from virt vbt generation */ virt_vbt_generation(&v); memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt)); return 0; } /** * intel_vgpu_opregion_base_write_handler - Opregion base register write handler * * @vgpu: a vGPU * @gpa: guest physical address of opregion * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { int i; gvt_dbg_core("emulate opregion from kernel\n"); for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; return 0; } /** * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion * @vgpu: a vGPU * */ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) { gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id); if (!vgpu_opregion(vgpu)->va) return; /* Guest opregion is released by VFIO */ free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } #define GVT_OPREGION_FUNC(scic) \ ({ \ u32 __ret; \ __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \ OPREGION_SCIC_FUNC_SHIFT; \ __ret; \ }) #define GVT_OPREGION_SUBFUNC(scic) \ ({ \ u32 __ret; \ __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \ OPREGION_SCIC_SUBFUNC_SHIFT; \ __ret; \ }) static const char *opregion_func_name(u32 func) { const char *name = NULL; switch (func) { case 0 ... 3: case 5: case 7 ... 15: name = "Reserved"; break; case 4: name = "Get BIOS Data"; break; case 6: name = "System BIOS Callbacks"; break; default: name = "Unknown"; break; } return name; } static const char *opregion_subfunc_name(u32 subfunc) { const char *name = NULL; switch (subfunc) { case 0: name = "Supported Calls"; break; case 1: name = "Requested Callbacks"; break; case 2 ... 3: case 8 ... 9: name = "Reserved"; break; case 5: name = "Boot Display"; break; case 6: name = "TV-Standard/Video-Connector"; break; case 7: name = "Internal Graphics"; break; case 10: name = "Spread Spectrum Clocks"; break; case 11: name = "Get AKSV"; break; default: name = "Unknown"; break; } return name; }; static bool querying_capabilities(u32 scic) { u32 func, subfunc; func = GVT_OPREGION_FUNC(scic); subfunc = GVT_OPREGION_SUBFUNC(scic); if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA && subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS) || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA && subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS) || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS && subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) { return true; } return false; } /** * intel_vgpu_emulate_opregion_request - emulating OpRegion request * @vgpu: a vGPU * @swsci: SWSCI request * * Returns: * Zero on success, negative error code if failed */ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) { u32 scic, parm; u32 func, subfunc; u64 scic_pa = 0, parm_pa = 0; int ret; scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + INTEL_GVT_OPREGION_SCIC; parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + INTEL_GVT_OPREGION_PARM; ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } if (!(swsci & SWSCI_SCI_SELECT)) { gvt_vgpu_err("requesting SMI service\n"); return 0; } /* ignore non 0->1 trasitions */ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI] & SWSCI_SCI_TRIGGER) || !(swsci & SWSCI_SCI_TRIGGER)) { return 0; } func = GVT_OPREGION_FUNC(scic); subfunc = GVT_OPREGION_SUBFUNC(scic); if (!querying_capabilities(scic)) { gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", opregion_func_name(func), opregion_subfunc_name(subfunc)); /* * emulate exit status of function call, '0' means * "failure, generic, unsupported or unknown cause" */ scic &= ~OPREGION_SCIC_EXIT_MASK; goto out; } scic = 0; parm = 0; out: ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm)); if (ret) { gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", ret, scic_pa, sizeof(scic)); return ret; } return 0; }
linux-master
drivers/gpu/drm/i915/gvt/opregion.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eddie Dong <[email protected]> * Kevin Tian <[email protected]> * * Contributors: * Ping Gao <[email protected]> * Zhi Wang <[email protected]> * Bing Niu <[email protected]> * */ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" void populate_pvinfo_page(struct intel_vgpu *vgpu) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; /* setup the ballooning information */ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_aperture_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_hidden_gmadr_base(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_hidden_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX; vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX; gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n", vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } /* * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU * generation type (e.g V4 as BDW server, V5 as SKL server). * * Depening on the physical SKU resource, we might see vGPU types like * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of * vGPU on same physical GPU depending on available resource. Each vGPU * type will have a different number of avail_instance to indicate how * many vGPU instance can be created for this type. */ #define VGPU_MAX_WEIGHT 16 #define VGPU_WEIGHT(vgpu_num) \ (VGPU_MAX_WEIGHT / (vgpu_num)) static const struct intel_vgpu_config intel_vgpu_configs[] = { { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" }, { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" }, { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" }, }; /** * intel_gvt_init_vgpu_types - initialize vGPU type list * @gvt : GVT device * * Initialize vGPU type list based on available resource. * */ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) { unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs); unsigned int i; gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), GFP_KERNEL); if (!gvt->types) return -ENOMEM; gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types), GFP_KERNEL); if (!gvt->mdev_types) goto out_free_types; for (i = 0; i < num_types; ++i) { const struct intel_vgpu_config *conf = &intel_vgpu_configs[i]; if (low_avail / conf->low_mm == 0) break; if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT) goto out_free_mdev_types; sprintf(gvt->types[i].name, "GVTg_V%u_%s", GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name); gvt->types[i].conf = conf; gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, min(low_avail / conf->low_mm, high_avail / conf->high_mm), conf->low_mm, conf->high_mm, conf->fence, conf->weight, vgpu_edid_str(conf->edid)); gvt->mdev_types[i] = &gvt->types[i].type; gvt->mdev_types[i]->sysfs_name = gvt->types[i].name; } gvt->num_types = i; return 0; out_free_mdev_types: kfree(gvt->mdev_types); out_free_types: kfree(gvt->types); return -EINVAL; } void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) { kfree(gvt->mdev_types); kfree(gvt->types); } /** * intel_gvt_activate_vgpu - activate a virtual GPU * @vgpu: virtual GPU * * This function is called when user wants to activate a virtual GPU. * */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); } /** * intel_gvt_deactivate_vgpu - deactivate a virtual GPU * @vgpu: virtual GPU * * This function is called when user wants to deactivate a virtual GPU. * The virtual GPU will be stopped. * */ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { mutex_lock(&vgpu->vgpu_lock); clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); if (atomic_read(&vgpu->submission.running_workload_num)) { mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_stop_schedule(vgpu); mutex_unlock(&vgpu->vgpu_lock); } /** * intel_gvt_release_vgpu - release a virtual GPU * @vgpu: virtual GPU * * This function is called when user wants to release a virtual GPU. * The virtual GPU will be stopped and all runtime information will be * destroyed. * */ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) { intel_gvt_deactivate_vgpu(vgpu); mutex_lock(&vgpu->vgpu_lock); vgpu->d3_entered = false; intel_vgpu_clean_workloads(vgpu, ALL_ENGINES); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); } /** * intel_gvt_destroy_vgpu - destroy a virtual GPU * @vgpu: virtual GPU * * This function is called when user wants to destroy a virtual GPU. * */ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status), "vGPU is still active!\n"); /* * remove idr first so later clean can judge if need to stop * service if no active vgpu. */ mutex_lock(&gvt->lock); idr_remove(&gvt->vgpu_idr, vgpu->id); mutex_unlock(&gvt->lock); mutex_lock(&vgpu->vgpu_lock); intel_gvt_debugfs_remove_vgpu(vgpu); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); intel_vgpu_clean_opregion(vgpu); intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); intel_vgpu_detach_regions(vgpu); intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); } #define IDLE_VGPU_IDR 0 /** * intel_gvt_create_idle_vgpu - create an idle virtual GPU * @gvt: GVT device * * This function is called when user wants to create an idle virtual GPU. * * Returns: * pointer to intel_vgpu, error pointer if failed. */ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) { struct intel_vgpu *vgpu; enum intel_engine_id i; int ret; vgpu = vzalloc(sizeof(*vgpu)); if (!vgpu) return ERR_PTR(-ENOMEM); vgpu->id = IDLE_VGPU_IDR; vgpu->gvt = gvt; mutex_init(&vgpu->vgpu_lock); for (i = 0; i < I915_NUM_ENGINES; i++) INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]); ret = intel_vgpu_init_sched_policy(vgpu); if (ret) goto out_free_vgpu; clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status); return vgpu; out_free_vgpu: vfree(vgpu); return ERR_PTR(ret); } /** * intel_gvt_destroy_idle_vgpu - destroy an idle virtual GPU * @vgpu: virtual GPU * * This function is called when user wants to destroy an idle virtual GPU. * */ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) { mutex_lock(&vgpu->vgpu_lock); intel_vgpu_clean_sched_policy(vgpu); mutex_unlock(&vgpu->vgpu_lock); vfree(vgpu); } int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, const struct intel_vgpu_config *conf) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; gvt_dbg_core("low %u MB high %u MB fence %u\n", BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm), conf->fence); mutex_lock(&gvt->lock); ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, GFP_KERNEL); if (ret < 0) goto out_unlock; vgpu->id = ret; vgpu->sched_ctl.weight = conf->weight; mutex_init(&vgpu->vgpu_lock); mutex_init(&vgpu->dmabuf_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init_base(&vgpu->object_idr, 1); intel_vgpu_init_cfg_space(vgpu, 1); vgpu->d3_entered = false; ret = intel_vgpu_init_mmio(vgpu); if (ret) goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, conf); if (ret) goto out_clean_vgpu_mmio; populate_pvinfo_page(vgpu); ret = intel_vgpu_init_gtt(vgpu); if (ret) goto out_clean_vgpu_resource; ret = intel_vgpu_init_opregion(vgpu); if (ret) goto out_clean_gtt; ret = intel_vgpu_init_display(vgpu, conf->edid); if (ret) goto out_clean_opregion; ret = intel_vgpu_setup_submission(vgpu); if (ret) goto out_clean_display; ret = intel_vgpu_init_sched_policy(vgpu); if (ret) goto out_clean_submission; intel_gvt_debugfs_add_vgpu(vgpu); ret = intel_gvt_set_opregion(vgpu); if (ret) goto out_clean_sched_policy; if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) ret = intel_gvt_set_edid(vgpu, PORT_B); else ret = intel_gvt_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; intel_gvt_update_reg_whitelist(vgpu); mutex_unlock(&gvt->lock); return 0; out_clean_sched_policy: intel_vgpu_clean_sched_policy(vgpu); out_clean_submission: intel_vgpu_clean_submission(vgpu); out_clean_display: intel_vgpu_clean_display(vgpu); out_clean_opregion: intel_vgpu_clean_opregion(vgpu); out_clean_gtt: intel_vgpu_clean_gtt(vgpu); out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: intel_vgpu_clean_mmio(vgpu); out_clean_idr: idr_remove(&gvt->vgpu_idr, vgpu->id); out_unlock: mutex_unlock(&gvt->lock); return ret; } /** * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset * @vgpu: virtual GPU * @dmlr: vGPU Device Model Level Reset or GT Reset * @engine_mask: engines to reset for GT reset * * This function is called when user wants to reset a virtual GPU through * device model reset or GT reset. The caller should hold the vgpu lock. * * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset * the whole vGPU to default state as when it is created. This vGPU function * is required both for functionary and security concerns.The ultimate goal * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we * assign a vGPU to a virtual machine we must isse such reset first. * * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. * Unlike the FLR, GT reset only reset particular resource of a vGPU per * the reset request. Guest driver can issue a GT reset by programming the * virtual GDRST register to reset specific virtual GPU engine or all * engines. * * The parameter dev_level is to identify if we will do DMLR or GT reset. * The parameter engine_mask is to specific the engines that need to be * resetted. If value ALL_ENGINES is given for engine_mask, it means * the caller requests a full GT reset that we will reset all virtual * GPU engines. For FLR, engine_mask is ignored. */ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_engine_mask_t engine_mask) { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask; gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", vgpu->id, dmlr, engine_mask); vgpu->resetting_eng = resetting_eng; intel_vgpu_stop_schedule(vgpu); /* * The current_vgpu will set to NULL after stopping the * scheduler when the reset is triggered by current vgpu. */ if (scheduler->current_vgpu == NULL) { mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_reset_submission(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); if (engine_mask == ALL_ENGINES) intel_vgpu_invalidate_ppgtt(vgpu); /*fence will not be reset during virtual reset */ if (dmlr) { if(!vgpu->d3_entered) { intel_vgpu_invalidate_ppgtt(vgpu); intel_vgpu_destroy_all_ppgtt_mm(vgpu); } intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_reset_resource(vgpu); } intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); if (dmlr) { intel_vgpu_reset_display(vgpu); intel_vgpu_reset_cfg_space(vgpu); /* only reset the failsafe mode when dmlr reset */ vgpu->failsafe = false; /* * PCI_D0 is set before dmlr, so reset d3_entered here * after done using. */ if(vgpu->d3_entered) vgpu->d3_entered = false; else vgpu->pv_notified = false; } } vgpu->resetting_eng = 0; gvt_dbg_core("reset vgpu%d done\n", vgpu->id); gvt_dbg_core("------------------------------------------\n"); } /** * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) * @vgpu: virtual GPU * * This function is called when user wants to reset a virtual GPU. * */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { mutex_lock(&vgpu->vgpu_lock); intel_gvt_reset_vgpu_locked(vgpu, true, 0); mutex_unlock(&vgpu->vgpu_lock); }
linux-master
drivers/gpu/drm/i915/gvt/vgpu.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Zhi Wang <[email protected]> * * Contributors: * Changbin Du <[email protected]> * */ #include <linux/firmware.h> #include <linux/crc32.h> #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #define FIRMWARE_VERSION (0x0) struct gvt_firmware_header { u64 magic; u32 crc32; /* protect the data after this field */ u32 version; u64 cfg_space_size; u64 cfg_space_offset; /* offset in the file */ u64 mmio_size; u64 mmio_offset; /* offset in the file */ unsigned char data[]; }; #define dev_to_drm_minor(d) dev_get_drvdata((d)) static ssize_t gvt_firmware_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { memcpy(buf, attr->private + offset, count); return count; } static struct bin_attribute firmware_attr = { .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)}, .read = gvt_firmware_read, .write = NULL, .mmap = NULL, }; static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; struct drm_i915_private *i915 = gvt->gt->i915; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; int ret; size = offsetof(struct gvt_firmware_header, data) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); if (!firmware) return -ENOMEM; h = firmware; h->magic = VGT_MAGIC; h->version = FIRMWARE_VERSION; h->cfg_space_size = info->cfg_space_size; h->cfg_space_offset = offsetof(struct gvt_firmware_header, data); h->mmio_size = info->mmio_size; h->mmio_offset = h->cfg_space_offset + h->cfg_space_size; p = firmware + h->cfg_space_offset; memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space, info->cfg_space_size); memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size); p = firmware + h->mmio_offset; memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio, info->mmio_size); memcpy(p, gvt->firmware.mmio, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, version); h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); firmware_attr.size = size; firmware_attr.private = firmware; ret = device_create_bin_file(&pdev->dev, &firmware_attr); if (ret) { vfree(firmware); return ret; } return 0; } static void clean_firmware_sysfs(struct intel_gvt *gvt) { struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); device_remove_bin_file(&pdev->dev, &firmware_attr); vfree(firmware_attr.private); } /** * intel_gvt_free_firmware - free GVT firmware * @gvt: intel gvt device * */ void intel_gvt_free_firmware(struct intel_gvt *gvt) { if (!gvt->firmware.firmware_loaded) clean_firmware_sysfs(gvt); kfree(gvt->firmware.cfg_space); vfree(gvt->firmware.mmio); } static int verify_firmware(struct intel_gvt *gvt, const struct firmware *fw) { struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); struct gvt_firmware_header *h; unsigned long id, crc32_start; const void *mem; const char *item; u64 file, request; h = (struct gvt_firmware_header *)fw->data; crc32_start = offsetofend(struct gvt_firmware_header, crc32); mem = fw->data + crc32_start; #define VERIFY(s, a, b) do { \ item = (s); file = (u64)(a); request = (u64)(b); \ if ((a) != (b)) \ goto invalid_firmware; \ } while (0) VERIFY("magic number", h->magic, VGT_MAGIC); VERIFY("version", h->version, FIRMWARE_VERSION); VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start)); VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size); VERIFY("mmio size", h->mmio_size, info->mmio_size); mem = (fw->data + h->cfg_space_offset); id = *(u16 *)(mem + PCI_VENDOR_ID); VERIFY("vendor id", id, pdev->vendor); id = *(u16 *)(mem + PCI_DEVICE_ID); VERIFY("device id", id, pdev->device); id = *(u8 *)(mem + PCI_REVISION_ID); VERIFY("revision id", id, pdev->revision); #undef VERIFY return 0; invalid_firmware: gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n", item, file, request); return -EINVAL; } #define GVT_FIRMWARE_PATH "i915/gvt" /** * intel_gvt_load_firmware - load GVT firmware * @gvt: intel gvt device * */ int intel_gvt_load_firmware(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); struct intel_gvt_firmware *firmware = &gvt->firmware; struct gvt_firmware_header *h; const struct firmware *fw; char *path; void *mem; int ret; path = kmalloc(PATH_MAX, GFP_KERNEL); if (!path) return -ENOMEM; mem = kmalloc(info->cfg_space_size, GFP_KERNEL); if (!mem) { kfree(path); return -ENOMEM; } firmware->cfg_space = mem; mem = vmalloc(info->mmio_size); if (!mem) { kfree(path); kfree(firmware->cfg_space); return -ENOMEM; } firmware->mmio = mem; sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); gvt_dbg_core("request hw state firmware %s...\n", path); ret = request_firmware(&fw, path, gvt->gt->i915->drm.dev); kfree(path); if (ret) goto expose_firmware; gvt_dbg_core("success.\n"); ret = verify_firmware(gvt, fw); if (ret) goto out_free_fw; gvt_dbg_core("verified.\n"); h = (struct gvt_firmware_header *)fw->data; memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset, h->cfg_space_size); memcpy(firmware->mmio, fw->data + h->mmio_offset, h->mmio_size); release_firmware(fw); firmware->firmware_loaded = true; return 0; out_free_fw: release_firmware(fw); expose_firmware: expose_firmware_sysfs(gvt); return 0; }
linux-master
drivers/gpu/drm/i915/gvt/firmware.c
/* * Copyright(c) 2011-2017 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/debugfs.h> #include <linux/list_sort.h> #include "i915_drv.h" #include "gvt.h" struct mmio_diff_param { struct intel_vgpu *vgpu; int total; int diff; struct list_head diff_mmio_list; }; struct diff_mmio { struct list_head node; u32 offset; u32 preg; u32 vreg; }; /* Compare two diff_mmio items. */ static int mmio_offset_compare(void *priv, const struct list_head *a, const struct list_head *b) { struct diff_mmio *ma; struct diff_mmio *mb; ma = container_of(a, struct diff_mmio, node); mb = container_of(b, struct diff_mmio, node); if (ma->offset < mb->offset) return -1; else if (ma->offset > mb->offset) return 1; return 0; } static inline int mmio_diff_handler(struct intel_gvt *gvt, u32 offset, void *data) { struct mmio_diff_param *param = data; struct diff_mmio *node; u32 preg, vreg; preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset)); vreg = vgpu_vreg(param->vgpu, offset); if (preg != vreg) { node = kmalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; node->offset = offset; node->preg = preg; node->vreg = vreg; list_add(&node->node, &param->diff_mmio_list); param->diff++; } param->total++; return 0; } /* Show the all the different values of tracked mmio. */ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) { struct intel_vgpu *vgpu = s->private; struct intel_gvt *gvt = vgpu->gvt; struct mmio_diff_param param = { .vgpu = vgpu, .total = 0, .diff = 0, }; struct diff_mmio *node, *next; INIT_LIST_HEAD(&param.diff_mmio_list); mutex_lock(&gvt->lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock); mmio_hw_access_pre(gvt->gt); /* Recognize all the diff mmios to list. */ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param); mmio_hw_access_post(gvt->gt); spin_unlock_bh(&gvt->scheduler.mmio_context_lock); mutex_unlock(&gvt->lock); /* In an ascending order by mmio offset. */ list_sort(NULL, &param.diff_mmio_list, mmio_offset_compare); seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff"); list_for_each_entry_safe(node, next, &param.diff_mmio_list, node) { u32 diff = node->preg ^ node->vreg; seq_printf(s, "%08x %08x %08x %*pbl\n", node->offset, node->preg, node->vreg, 32, &diff); list_del(&node->node); kfree(node); } seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff); return 0; } DEFINE_SHOW_ATTRIBUTE(vgpu_mmio_diff); static int vgpu_scan_nonprivbb_get(void *data, u64 *val) { struct intel_vgpu *vgpu = (struct intel_vgpu *)data; *val = vgpu->scan_nonprivbb; return 0; } /* * set/unset bit engine_id of vgpu->scan_nonprivbb to turn on/off scanning * of non-privileged batch buffer. e.g. * if vgpu->scan_nonprivbb=3, then it will scan non-privileged batch buffer * on engine 0 and 1. */ static int vgpu_scan_nonprivbb_set(void *data, u64 val) { struct intel_vgpu *vgpu = (struct intel_vgpu *)data; vgpu->scan_nonprivbb = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(vgpu_scan_nonprivbb_fops, vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set, "0x%llx\n"); static int vgpu_status_get(void *data, u64 *val) { struct intel_vgpu *vgpu = (struct intel_vgpu *)data; *val = 0; if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) *val |= (1 << INTEL_VGPU_STATUS_ATTACHED); if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status)) *val |= (1 << INTEL_VGPU_STATUS_ACTIVE); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n"); /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU * @vgpu: a vGPU */ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) { char name[16] = ""; snprintf(name, 16, "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu, &vgpu_mmio_diff_fops); debugfs_create_file_unsafe("scan_nonprivbb", 0644, vgpu->debugfs, vgpu, &vgpu_scan_nonprivbb_fops); debugfs_create_file_unsafe("status", 0644, vgpu->debugfs, vgpu, &vgpu_status_fops); } /** * intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU * @vgpu: a vGPU */ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_minor *minor = gvt->gt->i915->drm.primary; if (minor->debugfs_root && gvt->debugfs_root) { debugfs_remove_recursive(vgpu->debugfs); vgpu->debugfs = NULL; } } /** * intel_gvt_debugfs_init - register gvt debugfs root entry * @gvt: GVT device */ void intel_gvt_debugfs_init(struct intel_gvt *gvt) { struct drm_minor *minor = gvt->gt->i915->drm.primary; gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, &gvt->mmio.num_tracked_mmio); } /** * intel_gvt_debugfs_clean - remove debugfs entries * @gvt: GVT device */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { struct drm_minor *minor = gvt->gt->i915->drm.primary; if (minor->debugfs_root) { debugfs_remove_recursive(gvt->debugfs_root); gvt->debugfs_root = NULL; } }
linux-master
drivers/gpu/drm/i915/gvt/debugfs.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Zhiyuan Lv <[email protected]> * Zhi Wang <[email protected]> * * Contributors: * Min He <[email protected]> * Bing Niu <[email protected]> * Ping Gao <[email protected]> * Tina Zhang <[email protected]> * */ #include "i915_drv.h" #include "gvt.h" #define _EL_OFFSET_STATUS 0x234 #define _EL_OFFSET_STATUS_BUF 0x370 #define _EL_OFFSET_STATUS_PTR 0x3A0 #define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset)) #define valid_context(ctx) ((ctx)->valid) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) static int context_switch_events[] = { [RCS0] = RCS_AS_CONTEXT_SWITCH, [BCS0] = BCS_AS_CONTEXT_SWITCH, [VCS0] = VCS_AS_CONTEXT_SWITCH, [VCS1] = VCS2_AS_CONTEXT_SWITCH, [VECS0] = VECS_AS_CONTEXT_SWITCH, }; static int to_context_switch_event(const struct intel_engine_cs *engine) { if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[engine->id]; } static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) { gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n", execlist->running_slot ? execlist->running_slot->index : -1, execlist->running_context ? execlist->running_context->context_id : 0, execlist->pending_slot ? execlist->pending_slot->index : -1); execlist->running_slot = execlist->pending_slot; execlist->pending_slot = NULL; execlist->running_context = execlist->running_context ? &execlist->running_slot->ctx[0] : NULL; gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n", execlist->running_slot ? execlist->running_slot->index : -1, execlist->running_context ? execlist->running_context->context_id : 0, execlist->pending_slot ? execlist->pending_slot->index : -1); } static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) { struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct execlist_ctx_descriptor_format *desc = execlist->running_context; struct intel_vgpu *vgpu = execlist->vgpu; struct execlist_status_format status; u32 status_reg = execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); status.ldw = vgpu_vreg(vgpu, status_reg); status.udw = vgpu_vreg(vgpu, status_reg + 4); if (running) { status.current_execlist_pointer = !!running->index; status.execlist_write_pointer = !!!running->index; status.execlist_0_active = status.execlist_0_valid = !!!(running->index); status.execlist_1_active = status.execlist_1_valid = !!(running->index); } else { status.context_id = 0; status.execlist_0_active = status.execlist_0_valid = 0; status.execlist_1_active = status.execlist_1_valid = 0; } status.context_id = desc ? desc->context_id : 0; status.execlist_queue_full = !!(pending); vgpu_vreg(vgpu, status_reg) = status.ldw; vgpu_vreg(vgpu, status_reg + 4) = status.udw; gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n", vgpu->id, status_reg, status.ldw, status.udw); } static void emulate_csb_update(struct intel_vgpu_execlist *execlist, struct execlist_context_status_format *status, bool trigger_interrupt_later) { struct intel_vgpu *vgpu = execlist->vgpu; struct execlist_context_status_pointer_format ctx_status_ptr; u32 write_pointer; u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; unsigned long hwsp_gpa; ctx_status_ptr_reg = execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR); ctx_status_buf_reg = execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); write_pointer = ctx_status_ptr.write_ptr; if (write_pointer == 0x7) write_pointer = 0; else { ++write_pointer; write_pointer %= 0x6; } offset = ctx_status_buf_reg + write_pointer * 8; vgpu_vreg(vgpu, offset) = status->ldw; vgpu_vreg(vgpu, offset + 4) = status->udw; ctx_status_ptr.write_ptr = write_pointer; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; /* Update the CSB and CSB write pointer in HWSP */ hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, vgpu->hws_pga[execlist->engine->id]); if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { intel_gvt_write_gpa(vgpu, hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, status, 8); intel_gvt_write_gpa(vgpu, hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, &write_pointer, 4); } gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", vgpu->id, write_pointer, offset, status->ldw, status->udw); if (trigger_interrupt_later) return; intel_vgpu_trigger_virtual_event(vgpu, to_context_switch_event(execlist->engine)); } static int emulate_execlist_ctx_schedule_out( struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format *ctx) { struct intel_vgpu *vgpu = execlist->vgpu; struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1]; struct execlist_context_status_format status; memset(&status, 0, sizeof(status)); gvt_dbg_el("schedule out context id %x\n", ctx->context_id); if (WARN_ON(!same_context(ctx, execlist->running_context))) { gvt_vgpu_err("schedule out context is not running context," "ctx id %x running ctx id %x\n", ctx->context_id, execlist->running_context->context_id); return -EINVAL; } /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */ if (valid_context(ctx1) && same_context(ctx0, ctx)) { gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n"); execlist->running_context = ctx1; emulate_execlist_status(execlist); status.context_complete = status.element_switch = 1; status.context_id = ctx->context_id; emulate_csb_update(execlist, &status, false); /* * ctx1 is not valid, ctx == ctx0 * ctx1 is valid, ctx1 == ctx * --> last element is finished * emulate: * active-to-idle if there is *no* pending execlist * context-complete if there *is* pending execlist */ } else if ((!valid_context(ctx1) && same_context(ctx0, ctx)) || (valid_context(ctx1) && same_context(ctx1, ctx))) { gvt_dbg_el("need to switch virtual execlist slot\n"); switch_virtual_execlist_slot(execlist); emulate_execlist_status(execlist); status.context_complete = status.active_to_idle = 1; status.context_id = ctx->context_id; if (!pending) { emulate_csb_update(execlist, &status, false); } else { emulate_csb_update(execlist, &status, true); memset(&status, 0, sizeof(status)); status.idle_to_active = 1; status.context_id = 0; emulate_csb_update(execlist, &status, false); } } else { WARN_ON(1); return -EINVAL; } return 0; } static struct intel_vgpu_execlist_slot *get_next_execlist_slot( struct intel_vgpu_execlist *execlist) { struct intel_vgpu *vgpu = execlist->vgpu; u32 status_reg = execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); struct execlist_status_format status; status.ldw = vgpu_vreg(vgpu, status_reg); status.udw = vgpu_vreg(vgpu, status_reg + 4); if (status.execlist_queue_full) { gvt_vgpu_err("virtual execlist slots are full\n"); return NULL; } return &execlist->slot[status.execlist_write_pointer]; } static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, struct execlist_ctx_descriptor_format ctx[2]) { struct intel_vgpu_execlist_slot *running = execlist->running_slot; struct intel_vgpu_execlist_slot *slot = get_next_execlist_slot(execlist); struct execlist_ctx_descriptor_format *ctx0, *ctx1; struct execlist_context_status_format status; struct intel_vgpu *vgpu = execlist->vgpu; gvt_dbg_el("emulate schedule-in\n"); if (!slot) { gvt_vgpu_err("no available execlist slot\n"); return -EINVAL; } memset(&status, 0, sizeof(status)); memset(slot->ctx, 0, sizeof(slot->ctx)); slot->ctx[0] = ctx[0]; slot->ctx[1] = ctx[1]; gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n", slot->index, ctx[0].context_id, ctx[1].context_id); /* * no running execlist, make this write bundle as running execlist * -> idle-to-active */ if (!running) { gvt_dbg_el("no current running execlist\n"); execlist->running_slot = slot; execlist->pending_slot = NULL; execlist->running_context = &slot->ctx[0]; gvt_dbg_el("running slot index %d running context %x\n", execlist->running_slot->index, execlist->running_context->context_id); emulate_execlist_status(execlist); status.idle_to_active = 1; status.context_id = 0; emulate_csb_update(execlist, &status, false); return 0; } ctx0 = &running->ctx[0]; ctx1 = &running->ctx[1]; gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n", running->index, ctx0->context_id, ctx1->context_id); /* * already has an running execlist * a. running ctx1 is valid, * ctx0 is finished, and running ctx1 == new execlist ctx[0] * b. running ctx1 is not valid, * ctx0 == new execlist ctx[0] * ----> lite-restore + preempted */ if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) && /* condition a */ (!same_context(ctx0, execlist->running_context))) || (!valid_context(ctx1) && same_context(ctx0, &slot->ctx[0]))) { /* condition b */ gvt_dbg_el("need to switch virtual execlist slot\n"); execlist->pending_slot = slot; switch_virtual_execlist_slot(execlist); emulate_execlist_status(execlist); status.lite_restore = status.preempted = 1; status.context_id = ctx[0].context_id; emulate_csb_update(execlist, &status, false); } else { gvt_dbg_el("emulate as pending slot\n"); /* * otherwise * --> emulate pending execlist exist + but no preemption case */ execlist->pending_slot = slot; emulate_execlist_status(execlist); } return 0; } #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct execlist_ctx_descriptor_format ctx[2]; int ret; if (!workload->emulate_schedule_in) return 0; ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], ctx); if (ret) { gvt_vgpu_err("fail to emulate execlist schedule in\n"); return ret; } return 0; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_execlist *execlist = &s->execlist[workload->engine->id]; struct intel_vgpu_workload *next_workload; struct list_head *next = workload_q_head(vgpu, workload->engine)->next; bool lite_restore = false; int ret = 0; gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); if (workload->status || vgpu->resetting_eng & workload->engine->mask) goto out; if (!list_empty(workload_q_head(vgpu, workload->engine))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; next_workload = container_of(next, struct intel_vgpu_workload, list); this_desc = &workload->ctx_desc; next_desc = &next_workload->ctx_desc; lite_restore = same_context(this_desc, next_desc); } if (lite_restore) { gvt_dbg_el("next context == current - no schedule-out\n"); goto out; } ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); out: return ret; } static int submit_context(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine, struct execlist_ctx_descriptor_format *desc, bool emulate_schedule_in) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_workload *workload = NULL; workload = intel_vgpu_create_workload(vgpu, engine, desc); if (IS_ERR(workload)) return PTR_ERR(workload); workload->prepare = prepare_execlist_workload; workload->complete = complete_execlist_workload; workload->emulate_schedule_in = emulate_schedule_in; if (emulate_schedule_in) workload->elsp_dwords = s->execlist[engine->id].elsp_dwords; gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, emulate_schedule_in); intel_vgpu_queue_workload(workload); return 0; } int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; struct execlist_ctx_descriptor_format *desc[2]; int i, ret; desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); if (!desc[0]->valid) { gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n"); goto inv_desc; } for (i = 0; i < ARRAY_SIZE(desc); i++) { if (!desc[i]->valid) continue; if (!desc[i]->privilege_access) { gvt_vgpu_err("unexpected GGTT elsp submission\n"); goto inv_desc; } } /* submit workload */ for (i = 0; i < ARRAY_SIZE(desc); i++) { if (!desc[i]->valid) continue; ret = submit_context(vgpu, engine, desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); return ret; } } return 0; inv_desc: gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw); return -EINVAL; } static void init_vgpu_execlist(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; struct execlist_context_status_pointer_format ctx_status_ptr; u32 ctx_status_ptr_reg; memset(execlist, 0, sizeof(*execlist)); execlist->vgpu = vgpu; execlist->engine = engine; execlist->slot[0].index = 0; execlist->slot[1].index = 1; ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.read_ptr = 0; ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } static void clean_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; intel_engine_mask_t tmp; for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; } } static void reset_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_engine_cs *engine; intel_engine_mask_t tmp; for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine); } static int init_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { reset_execlist(vgpu, engine_mask); return 0; } const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = { .name = "execlist", .init = init_execlist, .reset = reset_execlist, .clean = clean_execlist, };
linux-master
drivers/gpu/drm/i915/gvt/execlist.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Ke Yu * Kevin Tian <[email protected]> * Zhiyuan Lv <[email protected]> * * Contributors: * Min He <[email protected]> * Ping Gao <[email protected]> * Tina Zhang <[email protected]> * Yulei Zhang <[email protected]> * Zhi Wang <[email protected]> * */ #include <linux/slab.h> #include "i915_drv.h" #include "i915_reg.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "gt/intel_gt_requests.h" #include "gt/shmem_utils.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" #include "display/intel_display.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #define INVALID_OP (~0U) #define OP_LEN_MI 9 #define OP_LEN_2D 10 #define OP_LEN_3D_MEDIA 16 #define OP_LEN_MFX_VC 16 #define OP_LEN_VEBOX 16 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7) struct sub_op_bits { int hi; int low; }; struct decode_info { const char *name; int op_len; int nr_sub_op; const struct sub_op_bits *sub_op; }; #define MAX_CMD_BUDGET 0x7fffffff #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15) #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9) #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1) #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20) #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10) #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2) /* Render Command Map */ /* MI_* command Opcode (28:23) */ #define OP_MI_NOOP 0x0 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */ #define OP_MI_USER_INTERRUPT 0x2 #define OP_MI_WAIT_FOR_EVENT 0x3 #define OP_MI_FLUSH 0x4 #define OP_MI_ARB_CHECK 0x5 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */ #define OP_MI_REPORT_HEAD 0x7 #define OP_MI_ARB_ON_OFF 0x8 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */ #define OP_MI_BATCH_BUFFER_END 0xA #define OP_MI_SUSPEND_FLUSH 0xB #define OP_MI_PREDICATE 0xC /* IVB+ */ #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */ #define OP_MI_SET_APPID 0xE /* IVB+ */ #define OP_MI_RS_CONTEXT 0xF /* HSW+ */ #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */ #define OP_MI_DISPLAY_FLIP 0x14 #define OP_MI_SEMAPHORE_MBOX 0x16 #define OP_MI_SET_CONTEXT 0x18 #define OP_MI_MATH 0x1A #define OP_MI_URB_CLEAR 0x19 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */ #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */ #define OP_MI_STORE_DATA_IMM 0x20 #define OP_MI_STORE_DATA_INDEX 0x21 #define OP_MI_LOAD_REGISTER_IMM 0x22 #define OP_MI_UPDATE_GTT 0x23 #define OP_MI_STORE_REGISTER_MEM 0x24 #define OP_MI_FLUSH_DW 0x26 #define OP_MI_CLFLUSH 0x27 #define OP_MI_REPORT_PERF_COUNT 0x28 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */ #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */ #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */ #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */ #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */ #define OP_MI_2E 0x2E /* BDW+ */ #define OP_MI_2F 0x2F /* BDW+ */ #define OP_MI_BATCH_BUFFER_START 0x31 /* Bit definition for dword 0 */ #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8) #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U) #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U) /* 2D command: Opcode (28:22) */ #define OP_2D(x) ((2<<7) | x) #define OP_XY_SETUP_BLT OP_2D(0x1) #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3) #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11) #define OP_XY_PIXEL_BLT OP_2D(0x24) #define OP_XY_SCANLINES_BLT OP_2D(0x25) #define OP_XY_TEXT_BLT OP_2D(0x26) #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31) #define OP_XY_COLOR_BLT OP_2D(0x50) #define OP_XY_PAT_BLT OP_2D(0x51) #define OP_XY_MONO_PAT_BLT OP_2D(0x52) #define OP_XY_SRC_COPY_BLT OP_2D(0x53) #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54) #define OP_XY_FULL_BLT OP_2D(0x55) #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56) #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57) #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58) #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59) #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71) #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72) #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73) #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74) #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75) #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76) #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77) /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */ #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \ ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode)) #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03) #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) #define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04) #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0) #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1) #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2) #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) #define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3) #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5) #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */ #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */ #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */ #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */ #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08) #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09) #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A) #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B) #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */ #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E) #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F) #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10) #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11) #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12) #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13) #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14) #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15) #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16) #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17) #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18) #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */ #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */ #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */ #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */ #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */ #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */ #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */ #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */ #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */ #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */ #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */ #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */ #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */ #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */ #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */ #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */ #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */ #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */ #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */ #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */ #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */ #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */ #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */ #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */ #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */ #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */ #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */ #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */ #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */ #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */ #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */ #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */ #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */ #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */ #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */ #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */ #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */ #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */ #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */ #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */ #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */ #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */ #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */ #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */ #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */ #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */ #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */ #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */ #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */ #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */ #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */ #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00) #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02) #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04) #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05) #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06) #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07) #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08) #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A) #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B) #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C) #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D) #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E) #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F) #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10) #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11) #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */ #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */ #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */ #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */ #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */ #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17) #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18) #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */ #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */ #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */ #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C) #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00) #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00) /* VCCP Command Parser */ /* * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License) * git://anongit.freedesktop.org/vaapi/intel-driver * src/i965_defines.h * */ #define OP_MFX(pipeline, op, sub_opa, sub_opb) \ (3 << 13 | \ (pipeline) << 11 | \ (op) << 8 | \ (sub_opa) << 5 | \ (sub_opb)) #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */ #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */ #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */ #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */ #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */ #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */ #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */ #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */ #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */ #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */ #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */ #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */ #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */ #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */ #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */ #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */ #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */ #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */ #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */ #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */ #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */ #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */ #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */ #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */ #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */ #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */ #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */ #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */ #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */ #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */ #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */ #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */ #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */ #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */ #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */ #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */ #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */ #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0) #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2) #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8) #define OP_VEB(pipeline, op, sub_opa, sub_opb) \ (3 << 13 | \ (pipeline) << 11 | \ (op) << 8 | \ (sub_opa) << 5 | \ (sub_opb)) #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0) #define OP_VEB_STATE OP_VEB(2, 4, 0, 2) #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3) struct parser_exec_state; typedef int (*parser_cmd_handler)(struct parser_exec_state *s); #define GVT_CMD_HASH_BITS 7 /* which DWords need address fix */ #define ADDR_FIX_1(x1) (1 << (x1)) #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2)) #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3)) #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4)) #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) #define DWORD_FIELD(dword, end, start) \ FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) #define OP_LENGTH_BIAS 2 #define CMD_LEN(value) (value + OP_LENGTH_BIAS) static int gvt_check_valid_cmd_length(int len, int valid_len) { if (valid_len != len) { gvt_err("len is not valid: len=%u valid_len=%u\n", len, valid_len); return -EFAULT; } return 0; } struct cmd_info { const char *name; u32 opcode; #define F_LEN_MASK 3U #define F_LEN_CONST 1U #define F_LEN_VAR 0U /* value is const although LEN maybe variable */ #define F_LEN_VAR_FIXED (1<<1) /* * command has its own ip advance logic * e.g. MI_BATCH_START, MI_BATCH_END */ #define F_IP_ADVANCE_CUSTOM (1<<2) u32 flag; #define R_RCS BIT(RCS0) #define R_VCS1 BIT(VCS0) #define R_VCS2 BIT(VCS1) #define R_VCS (R_VCS1 | R_VCS2) #define R_BCS BIT(BCS0) #define R_VECS BIT(VECS0) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) /* rings that support this cmd: BLT/RCS/VCS/VECS */ intel_engine_mask_t rings; /* devices that support this cmd: SNB/IVB/HSW/... */ u16 devices; /* which DWords are address that need fix up. * bit 0 means a 32-bit non address operand in command * bit 1 means address operand, which could be 32-bit * or 64-bit depending on different architectures.( * defined by "gmadr_bytes_in_cmd" in intel_gvt. * No matter the address length, each address only takes * one bit in the bitmap. */ u16 addr_bitmap; /* flag == F_LEN_CONST : command length * flag == F_LEN_VAR : length bias bits * Note: length is in DWord */ u32 len; parser_cmd_handler handler; /* valid length in DWord */ u32 valid_len; }; struct cmd_entry { struct hlist_node hlist; const struct cmd_info *info; }; enum { RING_BUFFER_INSTRUCTION, BATCH_BUFFER_INSTRUCTION, BATCH_BUFFER_2ND_LEVEL, RING_BUFFER_CTX, }; enum { GTT_BUFFER, PPGTT_BUFFER }; struct parser_exec_state { struct intel_vgpu *vgpu; const struct intel_engine_cs *engine; int buf_type; /* batch buffer address type */ int buf_addr_type; /* graphics memory address of ring buffer start */ unsigned long ring_start; unsigned long ring_size; unsigned long ring_head; unsigned long ring_tail; /* instruction graphics memory address */ unsigned long ip_gma; /* mapped va of the instr_gma */ void *ip_va; void *rb_va; void *ret_bb_va; /* next instruction when return from batch buffer to ring buffer */ unsigned long ret_ip_gma_ring; /* next instruction when return from 2nd batch buffer to batch buffer */ unsigned long ret_ip_gma_bb; /* batch buffer address type (GTT or PPGTT) * used when ret from 2nd level batch buffer */ int saved_buf_addr_type; bool is_ctx_wa; bool is_init_ctx; const struct cmd_info *info; struct intel_vgpu_workload *workload; }; #define gmadr_dw_number(s) \ (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) static unsigned long bypass_scan_mask = 0; /* ring ALL, type = 0 */ static const struct sub_op_bits sub_op_mi[] = { {31, 29}, {28, 23}, }; static const struct decode_info decode_info_mi = { "MI", OP_LEN_MI, ARRAY_SIZE(sub_op_mi), sub_op_mi, }; /* ring RCS, command type 2 */ static const struct sub_op_bits sub_op_2d[] = { {31, 29}, {28, 22}, }; static const struct decode_info decode_info_2d = { "2D", OP_LEN_2D, ARRAY_SIZE(sub_op_2d), sub_op_2d, }; /* ring RCS, command type 3 */ static const struct sub_op_bits sub_op_3d_media[] = { {31, 29}, {28, 27}, {26, 24}, {23, 16}, }; static const struct decode_info decode_info_3d_media = { "3D_Media", OP_LEN_3D_MEDIA, ARRAY_SIZE(sub_op_3d_media), sub_op_3d_media, }; /* ring VCS, command type 3 */ static const struct sub_op_bits sub_op_mfx_vc[] = { {31, 29}, {28, 27}, {26, 24}, {23, 21}, {20, 16}, }; static const struct decode_info decode_info_mfx_vc = { "MFX_VC", OP_LEN_MFX_VC, ARRAY_SIZE(sub_op_mfx_vc), sub_op_mfx_vc, }; /* ring VECS, command type 3 */ static const struct sub_op_bits sub_op_vebox[] = { {31, 29}, {28, 27}, {26, 24}, {23, 21}, {20, 16}, }; static const struct decode_info decode_info_vebox = { "VEBOX", OP_LEN_VEBOX, ARRAY_SIZE(sub_op_vebox), sub_op_vebox, }; static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { [RCS0] = { &decode_info_mi, NULL, NULL, &decode_info_3d_media, NULL, NULL, NULL, NULL, }, [VCS0] = { &decode_info_mi, NULL, NULL, &decode_info_mfx_vc, NULL, NULL, NULL, NULL, }, [BCS0] = { &decode_info_mi, NULL, &decode_info_2d, NULL, NULL, NULL, NULL, NULL, }, [VECS0] = { &decode_info_mi, NULL, NULL, &decode_info_vebox, NULL, NULL, NULL, NULL, }, [VCS1] = { &decode_info_mi, NULL, NULL, &decode_info_mfx_vc, NULL, NULL, NULL, NULL, }, }; static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine) { const struct decode_info *d_info; d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; if (d_info == NULL) return INVALID_OP; return cmd >> (32 - d_info->op_len); } static inline const struct cmd_info * find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, const struct intel_engine_cs *engine) { struct cmd_entry *e; hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { if (opcode == e->info->opcode && e->info->rings & engine->mask) return e->info; } return NULL; } static inline const struct cmd_info * get_cmd_info(struct intel_gvt *gvt, u32 cmd, const struct intel_engine_cs *engine) { u32 opcode; opcode = get_opcode(cmd, engine); if (opcode == INVALID_OP) return NULL; return find_cmd_entry(gvt, opcode, engine); } static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) { return (cmd >> low) & ((1U << (hi - low + 1)) - 1); } static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine) { const struct decode_info *d_info; int i; d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; if (d_info == NULL) return; gvt_dbg_cmd("opcode=0x%x %s sub_ops:", cmd >> (32 - d_info->op_len), d_info->name); for (i = 0; i < d_info->nr_sub_op; i++) pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi, d_info->sub_op[i].low)); pr_err("\n"); } static inline u32 *cmd_ptr(struct parser_exec_state *s, int index) { return s->ip_va + (index << 2); } static inline u32 cmd_val(struct parser_exec_state *s, int index) { return *cmd_ptr(s, index); } static inline bool is_init_ctx(struct parser_exec_state *s) { return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); } static void parser_exec_state_dump(struct parser_exec_state *s) { int cnt = 0; int i; gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)" " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, s->engine->name, s->ring_start, s->ring_start + s->ring_size, s->ring_head, s->ring_tail); gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", s->buf_type == RING_BUFFER_INSTRUCTION ? "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? "CTX_BUFFER" : "BATCH_BUFFER"), s->buf_addr_type == GTT_BUFFER ? "GTT" : "PPGTT", s->ip_gma); if (s->ip_va == NULL) { gvt_dbg_cmd(" ip_va(NULL)"); return; } gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n", s->ip_va, cmd_val(s, 0), cmd_val(s, 1), cmd_val(s, 2), cmd_val(s, 3)); print_opcode(cmd_val(s, 0), s->engine); s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); while (cnt < 1024) { gvt_dbg_cmd("ip_va=%p: ", s->ip_va); for (i = 0; i < 8; i++) gvt_dbg_cmd("%08x ", cmd_val(s, i)); gvt_dbg_cmd("\n"); s->ip_va += 8 * sizeof(u32); cnt += 8; } } static inline void update_ip_va(struct parser_exec_state *s) { unsigned long len = 0; if (WARN_ON(s->ring_head == s->ring_tail)) return; if (s->buf_type == RING_BUFFER_INSTRUCTION || s->buf_type == RING_BUFFER_CTX) { unsigned long ring_top = s->ring_start + s->ring_size; if (s->ring_head > s->ring_tail) { if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top) len = (s->ip_gma - s->ring_head); else if (s->ip_gma >= s->ring_start && s->ip_gma <= s->ring_tail) len = (ring_top - s->ring_head) + (s->ip_gma - s->ring_start); } else len = (s->ip_gma - s->ring_head); s->ip_va = s->rb_va + len; } else {/* shadow batch buffer */ s->ip_va = s->ret_bb_va; } } static inline int ip_gma_set(struct parser_exec_state *s, unsigned long ip_gma) { WARN_ON(!IS_ALIGNED(ip_gma, 4)); s->ip_gma = ip_gma; update_ip_va(s); return 0; } static inline int ip_gma_advance(struct parser_exec_state *s, unsigned int dw_len) { s->ip_gma += (dw_len << 2); if (s->buf_type == RING_BUFFER_INSTRUCTION) { if (s->ip_gma >= s->ring_start + s->ring_size) s->ip_gma -= s->ring_size; update_ip_va(s); } else { s->ip_va += (dw_len << 2); } return 0; } static inline int get_cmd_length(const struct cmd_info *info, u32 cmd) { if ((info->flag & F_LEN_MASK) == F_LEN_CONST) return info->len; else return (cmd & ((1U << info->len) - 1)) + 2; return 0; } static inline int cmd_length(struct parser_exec_state *s) { return get_cmd_length(s->info, cmd_val(s, 0)); } /* do not remove this, some platform may need clflush here */ #define patch_value(s, addr, val) do { \ *addr = val; \ } while (0) static inline bool is_mocs_mmio(unsigned int offset) { return ((offset >= 0xc800) && (offset <= 0xcff8)) || ((offset >= 0xb020) && (offset <= 0xb0a0)); } static int is_cmd_update_pdps(unsigned int offset, struct parser_exec_state *s) { u32 base = s->workload->engine->mmio_base; return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0)); } static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, unsigned int offset, unsigned int index) { struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; struct intel_vgpu_mm *mm; u64 pdps[GEN8_3LVL_PDPES]; if (shadow_mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { pdps[0] = (u64)cmd_val(s, 2) << 32; pdps[0] |= cmd_val(s, 4); mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); if (!mm) { gvt_vgpu_err("failed to get the 4-level shadow vm\n"); return -EINVAL; } intel_vgpu_mm_get(mm); list_add_tail(&mm->ppgtt_mm.link, &s->workload->lri_shadow_mm); *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); } else { /* Currently all guests use PML4 table and now can't * have a guest with 3-level table but uses LRI for * PPGTT update. So this is simply un-testable. */ GEM_BUG_ON(1); gvt_vgpu_err("invalid shared shadow vm type\n"); return -EINVAL; } return 0; } static int cmd_reg_handler(struct parser_exec_state *s, unsigned int offset, unsigned int index, char *cmd) { struct intel_vgpu *vgpu = s->vgpu; struct intel_gvt *gvt = vgpu->gvt; u32 ctx_sr_ctl; u32 *vreg, vreg_old; if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", cmd, offset); return -EFAULT; } if (is_init_ctx(s)) { struct intel_gvt_mmio_info *mmio_info; intel_gvt_mmio_set_cmd_accessible(gvt, offset); mmio_info = intel_gvt_find_mmio_info(gvt, offset); if (mmio_info && mmio_info->write) intel_gvt_mmio_set_cmd_write_patch(gvt, offset); return 0; } if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { gvt_vgpu_err("%s access to non-render register (%x)\n", cmd, offset); return -EBADRQC; } if (!strncmp(cmd, "srm", 3) || !strncmp(cmd, "lrm", 3)) { if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) || offset == 0x21f0 || (IS_BROADWELL(gvt->gt->i915) && offset == i915_mmio_reg_offset(INSTPM))) return 0; else { gvt_vgpu_err("%s access to register (%x)\n", cmd, offset); return -EPERM; } } if (!strncmp(cmd, "lrr-src", 7) || !strncmp(cmd, "lrr-dst", 7)) { if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c) return 0; else { gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset); return -EPERM; } } if (!strncmp(cmd, "pipe_ctrl", 9)) { /* TODO: add LRI POST logic here */ return 0; } if (strncmp(cmd, "lri", 3)) return -EPERM; /* below are all lri handlers */ vreg = &vgpu_vreg(s->vgpu, offset); if (is_cmd_update_pdps(offset, s) && cmd_pdp_mmio_update_handler(s, offset, index)) return -EINVAL; if (offset == i915_mmio_reg_offset(DERRMR) || offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } if (is_mocs_mmio(offset)) *vreg = cmd_val(s, index + 1); vreg_old = *vreg; if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) { u32 cmdval_new, cmdval; struct intel_gvt_mmio_info *mmio_info; cmdval = cmd_val(s, index + 1); mmio_info = intel_gvt_find_mmio_info(gvt, offset); if (!mmio_info) { cmdval_new = cmdval; } else { u64 ro_mask = mmio_info->ro_mask; int ret; if (likely(!ro_mask)) ret = mmio_info->write(s->vgpu, offset, &cmdval, 4); else { gvt_vgpu_err("try to write RO reg %x\n", offset); ret = -EBADRQC; } if (ret) return ret; cmdval_new = *vreg; } if (cmdval_new != cmdval) patch_value(s, cmd_ptr(s, index+1), cmdval_new); } /* only patch cmd. restore vreg value if changed in mmio write handler*/ *vreg = vreg_old; /* TODO * In order to let workload with inhibit context to generate * correct image data into memory, vregs values will be loaded to * hw via LRIs in the workload with inhibit context. But as * indirect context is loaded prior to LRIs in workload, we don't * want reg values specified in indirect context overwritten by * LRIs in workloads. So, when scanning an indirect context, we * update reg values in it into vregs, so LRIs in workload with * inhibit context will restore with correct values */ if (GRAPHICS_VER(s->engine->i915) == 9 && intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); /* check inhibit context */ if (ctx_sr_ctl & 1) { u32 data = cmd_val(s, index + 1); if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) intel_vgpu_mask_mmio_write(vgpu, offset, &data, 4); else vgpu_vreg(vgpu, offset) = data; } } return 0; } #define cmd_reg(s, i) \ (cmd_val(s, i) & GENMASK(22, 2)) #define cmd_reg_inhibit(s, i) \ (cmd_val(s, i) & GENMASK(22, 18)) #define cmd_gma(s, i) \ (cmd_val(s, i) & GENMASK(31, 2)) #define cmd_gma_hi(s, i) \ (cmd_val(s, i) & GENMASK(15, 0)) static int cmd_handler_lri(struct parser_exec_state *s) { int i, ret = 0; int cmd_len = cmd_length(s); for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { if (s->engine->id == BCS0 && cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) ret |= 0; else ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; } if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); if (ret) break; } return ret; } static int cmd_handler_lrr(struct parser_exec_state *s) { int i, ret = 0; int cmd_len = cmd_length(s); for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(s->engine->i915)) ret |= ((cmd_reg_inhibit(s, i) || (cmd_reg_inhibit(s, i + 1)))) ? -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); if (ret) break; } return ret; } static inline int cmd_address_audit(struct parser_exec_state *s, unsigned long guest_gma, int op_size, bool index_mode); static int cmd_handler_lrm(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; unsigned long gma; int i, ret = 0; int cmd_len = cmd_length(s); for (i = 1; i < cmd_len;) { if (IS_BROADWELL(s->engine->i915)) ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); if (ret) break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); if (ret) break; } i += gmadr_dw_number(s) + 1; } return ret; } static int cmd_handler_srm(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; unsigned long gma; int i, ret = 0; int cmd_len = cmd_length(s); for (i = 1; i < cmd_len;) { ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); if (ret) break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); if (ret) break; } i += gmadr_dw_number(s) + 1; } return ret; } struct cmd_interrupt_event { int pipe_control_notify; int mi_flush_dw; int mi_user_interrupt; }; static const struct cmd_interrupt_event cmd_interrupt_events[] = { [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, .mi_user_interrupt = RCS_MI_USER_INTERRUPT, }, [BCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = BCS_MI_FLUSH_DW, .mi_user_interrupt = BCS_MI_USER_INTERRUPT, }, [VCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS_MI_FLUSH_DW, .mi_user_interrupt = VCS_MI_USER_INTERRUPT, }, [VCS1] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS2_MI_FLUSH_DW, .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, }, [VECS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VECS_MI_FLUSH_DW, .mi_user_interrupt = VECS_MI_USER_INTERRUPT, }, }; static int cmd_handler_pipe_control(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; unsigned long gma; bool index_mode = false; unsigned int post_sync; int ret = 0; u32 hws_pga, val; post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; /* LRI post sync */ if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE) ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl"); /* post sync */ else if (post_sync) { if (post_sync == 2) ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl"); else if (post_sync == 3) ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); else if (post_sync == 1) { /* check ggtt*/ if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { gma = cmd_val(s, 2) & GENMASK(31, 3); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, 3)) << 32; /* Store Data Index */ if (cmd_val(s, 1) & (1 << 21)) index_mode = true; ret |= cmd_address_audit(s, gma, sizeof(u64), index_mode); if (ret) return ret; if (index_mode) { hws_pga = s->vgpu->hws_pga[s->engine->id]; gma = hws_pga + gma; patch_value(s, cmd_ptr(s, 2), gma); val = cmd_val(s, 1) & (~(1 << 21)); patch_value(s, cmd_ptr(s, 1), val); } } } } if (ret) return ret; if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, s->workload->pending_events); return 0; } static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, s->workload->pending_events); patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } static int cmd_advance_default(struct parser_exec_state *s) { return ip_gma_advance(s, cmd_length(s)); } static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) { int ret; if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { s->buf_type = BATCH_BUFFER_INSTRUCTION; ret = ip_gma_set(s, s->ret_ip_gma_bb); s->buf_addr_type = s->saved_buf_addr_type; } else if (s->buf_type == RING_BUFFER_CTX) { ret = ip_gma_set(s, s->ring_tail); } else { s->buf_type = RING_BUFFER_INSTRUCTION; s->buf_addr_type = GTT_BUFFER; if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size) s->ret_ip_gma_ring -= s->ring_size; ret = ip_gma_set(s, s->ret_ip_gma_ring); } return ret; } struct mi_display_flip_command_info { int pipe; int plane; int event; i915_reg_t stride_reg; i915_reg_t ctrl_reg; i915_reg_t surf_reg; u64 stride_val; u64 tile_val; u64 surf_val; bool async_flip; }; struct plane_code_mapping { int pipe; int plane; int event; }; static int gen8_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; struct plane_code_mapping gen8_plane_code[] = { [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE}, [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE}, [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE}, [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE}, }; u32 dword0, dword1, dword2; u32 v; dword0 = cmd_val(s, 0); dword1 = cmd_val(s, 1); dword2 = cmd_val(s, 2); v = (dword0 & GENMASK(21, 19)) >> 19; if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code))) return -EBADRQC; info->pipe = gen8_plane_code[v].pipe; info->plane = gen8_plane_code[v].plane; info->event = gen8_plane_code[v].event; info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; info->tile_val = (dword1 & 0x1); info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); if (info->plane == PLANE_A) { info->ctrl_reg = DSPCNTR(info->pipe); info->stride_reg = DSPSTRIDE(info->pipe); info->surf_reg = DSPSURF(info->pipe); } else if (info->plane == PLANE_B) { info->ctrl_reg = SPRCTL(info->pipe); info->stride_reg = SPRSTRIDE(info->pipe); info->surf_reg = SPRSURF(info->pipe); } else { drm_WARN_ON(&dev_priv->drm, 1); return -EBADRQC; } return 0; } static int skl_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; struct intel_vgpu *vgpu = s->vgpu; u32 dword0 = cmd_val(s, 0); u32 dword1 = cmd_val(s, 1); u32 dword2 = cmd_val(s, 2); u32 plane = (dword0 & GENMASK(12, 8)) >> 8; info->plane = PRIMARY_PLANE; switch (plane) { case MI_DISPLAY_FLIP_SKL_PLANE_1_A: info->pipe = PIPE_A; info->event = PRIMARY_A_FLIP_DONE; break; case MI_DISPLAY_FLIP_SKL_PLANE_1_B: info->pipe = PIPE_B; info->event = PRIMARY_B_FLIP_DONE; break; case MI_DISPLAY_FLIP_SKL_PLANE_1_C: info->pipe = PIPE_C; info->event = PRIMARY_C_FLIP_DONE; break; case MI_DISPLAY_FLIP_SKL_PLANE_2_A: info->pipe = PIPE_A; info->event = SPRITE_A_FLIP_DONE; info->plane = SPRITE_PLANE; break; case MI_DISPLAY_FLIP_SKL_PLANE_2_B: info->pipe = PIPE_B; info->event = SPRITE_B_FLIP_DONE; info->plane = SPRITE_PLANE; break; case MI_DISPLAY_FLIP_SKL_PLANE_2_C: info->pipe = PIPE_C; info->event = SPRITE_C_FLIP_DONE; info->plane = SPRITE_PLANE; break; default: gvt_vgpu_err("unknown plane code %d\n", plane); return -EBADRQC; } info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; info->tile_val = (dword1 & GENMASK(2, 0)); info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); info->ctrl_reg = DSPCNTR(info->pipe); info->stride_reg = DSPSTRIDE(info->pipe); info->surf_reg = DSPSURF(info->pipe); return 0; } static int gen8_check_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { u32 stride, tile; if (!info->async_flip) return 0; if (GRAPHICS_VER(s->engine->i915) >= 9) { stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; } else { stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(15, 6)) >> 6; tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; } if (stride != info->stride_val) gvt_dbg_cmd("cannot change stride during async flip\n"); if (tile != info->tile_val) gvt_dbg_cmd("cannot change tile during async flip\n"); return 0; } static int gen8_update_plane_mmio_from_mi_display_flip( struct parser_exec_state *s, struct mi_display_flip_command_info *info) { struct drm_i915_private *dev_priv = s->engine->i915; struct intel_vgpu *vgpu = s->vgpu; set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); if (GRAPHICS_VER(dev_priv) >= 9) { set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), info->tile_val << 10); } else { set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6), info->stride_val << 6); set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10), info->tile_val << 10); } if (info->plane == PLANE_PRIMARY) vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++; if (info->async_flip) intel_vgpu_trigger_virtual_event(vgpu, info->event); else set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]); return 0; } static int decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { if (IS_BROADWELL(s->engine->i915)) return gen8_decode_mi_display_flip(s, info); if (GRAPHICS_VER(s->engine->i915) >= 9) return skl_decode_mi_display_flip(s, info); return -ENODEV; } static int check_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { return gen8_check_mi_display_flip(s, info); } static int update_plane_mmio_from_mi_display_flip( struct parser_exec_state *s, struct mi_display_flip_command_info *info) { return gen8_update_plane_mmio_from_mi_display_flip(s, info); } static int cmd_handler_mi_display_flip(struct parser_exec_state *s) { struct mi_display_flip_command_info info; struct intel_vgpu *vgpu = s->vgpu; int ret; int i; int len = cmd_length(s); u32 valid_len = CMD_LEN(1); /* Flip Type == Stereo 3D Flip */ if (DWORD_FIELD(2, 1, 0) == 2) valid_len++; ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) return ret; ret = decode_mi_display_flip(s, &info); if (ret) { gvt_vgpu_err("fail to decode MI display flip command\n"); return ret; } ret = check_mi_display_flip(s, &info); if (ret) { gvt_vgpu_err("invalid MI display flip command\n"); return ret; } ret = update_plane_mmio_from_mi_display_flip(s, &info); if (ret) { gvt_vgpu_err("fail to update plane mmio\n"); return ret; } for (i = 0; i < len; i++) patch_value(s, cmd_ptr(s, i), MI_NOOP); return 0; } static bool is_wait_for_flip_pending(u32 cmd) { return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING | MI_WAIT_FOR_PLANE_B_FLIP_PENDING | MI_WAIT_FOR_PLANE_C_FLIP_PENDING | MI_WAIT_FOR_SPRITE_A_FLIP_PENDING | MI_WAIT_FOR_SPRITE_B_FLIP_PENDING | MI_WAIT_FOR_SPRITE_C_FLIP_PENDING); } static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s) { u32 cmd = cmd_val(s, 0); if (!is_wait_for_flip_pending(cmd)) return 0; patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) { unsigned long addr; unsigned long gma_high, gma_low; struct intel_vgpu *vgpu = s->vgpu; int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd; if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) { gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes); return INTEL_GVT_INVALID_ADDR; } gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; if (gmadr_bytes == 4) { addr = gma_low; } else { gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK; addr = (((unsigned long)gma_high) << 32) | gma_low; } return addr; } static inline int cmd_address_audit(struct parser_exec_state *s, unsigned long guest_gma, int op_size, bool index_mode) { struct intel_vgpu *vgpu = s->vgpu; u32 max_surface_size = vgpu->gvt->device_info.max_surface_size; int i; int ret; if (op_size > max_surface_size) { gvt_vgpu_err("command address audit fail name %s\n", s->info->name); return -EFAULT; } if (index_mode) { if (guest_gma >= I915_GTT_PAGE_SIZE) { ret = -EFAULT; goto err; } } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) { ret = -EFAULT; goto err; } return 0; err: gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", s->info->name, guest_gma, op_size); pr_err("cmd dump: "); for (i = 0; i < cmd_length(s); i++) { if (!(i % 4)) pr_err("\n%08x ", cmd_val(s, i)); else pr_err("%08x ", cmd_val(s, i)); } pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n", vgpu->id, vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_gmadr_end(vgpu), vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_gmadr_end(vgpu)); return ret; } static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; int op_size = (cmd_length(s) - 3) * sizeof(u32); int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; unsigned long gma, gma_low, gma_high; u32 valid_len = CMD_LEN(2); int ret = 0; /* check ppggt */ if (!(cmd_val(s, 0) & (1 << 22))) return 0; /* check if QWORD */ if (DWORD_FIELD(0, 21, 21)) valid_len++; ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) return ret; gma = cmd_val(s, 2) & GENMASK(31, 2); if (gmadr_bytes == 8) { gma_low = cmd_val(s, 1) & GENMASK(31, 2); gma_high = cmd_val(s, 2) & GENMASK(15, 0); gma = (gma_high << 32) | gma_low; core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0; } ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false); return ret; } static inline int unexpected_cmd(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); return -EBADRQC; } static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_op_2e(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_op_2f(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * sizeof(u32); unsigned long gma, gma_high; u32 valid_len = CMD_LEN(1); int ret = 0; if (!(cmd_val(s, 0) & (1 << 22))) return ret; /* check inline data */ if (cmd_val(s, 0) & BIT(18)) valid_len = CMD_LEN(9); ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) return ret; gma = cmd_val(s, 1) & GENMASK(31, 2); if (gmadr_bytes == 8) { gma_high = cmd_val(s, 2) & GENMASK(15, 0); gma = (gma_high << 32) | gma; } ret = cmd_address_audit(s, gma, op_size, false); return ret; } static int cmd_handler_mi_store_data_index(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_clflush(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_conditional_batch_buffer_end( struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_update_gtt(struct parser_exec_state *s) { return unexpected_cmd(s); } static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) { int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; unsigned long gma; bool index_mode = false; int ret = 0; u32 hws_pga, val; u32 valid_len = CMD_LEN(2); ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) { /* Check again for Qword */ ret = gvt_check_valid_cmd_length(cmd_length(s), ++valid_len); return ret; } /* Check post-sync and ppgtt bit */ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { gma = cmd_val(s, 1) & GENMASK(31, 3); if (gmadr_bytes == 8) gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32; /* Store Data Index */ if (cmd_val(s, 0) & (1 << 21)) index_mode = true; ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); if (ret) return ret; if (index_mode) { hws_pga = s->vgpu->hws_pga[s->engine->id]; gma = hws_pga + gma; patch_value(s, cmd_ptr(s, 1), gma); val = cmd_val(s, 0) & (~(1 << 21)); patch_value(s, cmd_ptr(s, 0), val); } } /* Check notify bit */ if ((cmd_val(s, 0) & (1 << 8))) set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, s->workload->pending_events); return ret; } static void addr_type_update_snb(struct parser_exec_state *s) { if ((s->buf_type == RING_BUFFER_INSTRUCTION) && (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) { s->buf_addr_type = PPGTT_BUFFER; } } static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, unsigned long gma, unsigned long end_gma, void *va) { unsigned long copy_len, offset; unsigned long len = 0; unsigned long gpa; while (gma != end_gma) { gpa = intel_vgpu_gma_to_gpa(mm, gma); if (gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("invalid gma address: %lx\n", gma); return -EFAULT; } offset = gma & (I915_GTT_PAGE_SIZE - 1); copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? I915_GTT_PAGE_SIZE - offset : end_gma - gma; intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len); len += copy_len; gma += copy_len; } return len; } /* * Check whether a batch buffer needs to be scanned. Currently * the only criteria is based on privilege. */ static int batch_buffer_needs_scan(struct parser_exec_state *s) { /* Decide privilege based on address space */ if (cmd_val(s, 0) & BIT(8) && !(s->vgpu->scan_nonprivbb & s->engine->mask)) return 0; return 1; } static const char *repr_addr_type(unsigned int type) { return type == PPGTT_BUFFER ? "ppgtt" : "ggtt"; } static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size, unsigned long *bb_end_cmd_offset) { unsigned long gma = 0; const struct cmd_info *info; u32 cmd_len = 0; bool bb_end = false; struct intel_vgpu *vgpu = s->vgpu; u32 cmd; struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; *bb_size = 0; *bb_end_cmd_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; cmd = cmd_val(s, 0); info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", cmd, get_opcode(cmd, s->engine), repr_addr_type(s->buf_addr_type), s->engine->name, s->workload); return -EBADRQC; } do { if (copy_gma_to_hva(s->vgpu, mm, gma, gma + 4, &cmd) < 0) return -EFAULT; info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", cmd, get_opcode(cmd, s->engine), repr_addr_type(s->buf_addr_type), s->engine->name, s->workload); return -EBADRQC; } if (info->opcode == OP_MI_BATCH_BUFFER_END) { bb_end = true; } else if (info->opcode == OP_MI_BATCH_BUFFER_START) { if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) /* chained batch buffer */ bb_end = true; } if (bb_end) *bb_end_cmd_offset = *bb_size; cmd_len = get_cmd_length(info, cmd) << 2; *bb_size += cmd_len; gma += cmd_len; } while (!bb_end); return 0; } static int audit_bb_end(struct parser_exec_state *s, void *va) { struct intel_vgpu *vgpu = s->vgpu; u32 cmd = *(u32 *)va; const struct cmd_info *info; info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", cmd, get_opcode(cmd, s->engine), repr_addr_type(s->buf_addr_type), s->engine->name, s->workload); return -EBADRQC; } if ((info->opcode == OP_MI_BATCH_BUFFER_END) || ((info->opcode == OP_MI_BATCH_BUFFER_START) && (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0))) return 0; return -EBADRQC; } static int perform_bb_shadow(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu_shadow_bb *bb; unsigned long gma = 0; unsigned long bb_size; unsigned long bb_end_cmd_offset; int ret = 0; struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; unsigned long start_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); if (ret) return ret; bb = kzalloc(sizeof(*bb), GFP_KERNEL); if (!bb) return -ENOMEM; bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; /* the start_offset stores the batch buffer's start gma's * offset relative to page boundary. so for non-privileged batch * buffer, the shadowed gem object holds exactly the same page * layout as original gem object. This is for the convience of * replacing the whole non-privilged batch buffer page to this * shadowed one in PPGTT at the same gma address. (this replacing * action is not implemented yet now, but may be necessary in * future). * for prileged batch buffer, we just change start gma address to * that of shadowed page. */ if (bb->ppgtt) start_offset = gma & ~I915_GTT_PAGE_MASK; bb->obj = i915_gem_object_create_shmem(s->engine->i915, round_up(bb_size + start_offset, PAGE_SIZE)); if (IS_ERR(bb->obj)) { ret = PTR_ERR(bb->obj); goto err_free_bb; } bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); if (IS_ERR(bb->va)) { ret = PTR_ERR(bb->va); goto err_free_obj; } ret = copy_gma_to_hva(s->vgpu, mm, gma, gma + bb_size, bb->va + start_offset); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); ret = -EFAULT; goto err_unmap; } ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); if (ret) goto err_unmap; i915_gem_object_unlock(bb->obj); INIT_LIST_HEAD(&bb->list); list_add(&bb->list, &s->workload->shadow_bb); bb->bb_start_cmd_va = s->ip_va; if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) bb->bb_offset = s->ip_va - s->rb_va; else bb->bb_offset = 0; /* * ip_va saves the virtual address of the shadow batch buffer, while * ip_gma saves the graphics address of the original batch buffer. * As the shadow batch buffer is just a copy from the originial one, * it should be right to use shadow batch buffer'va and original batch * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). */ s->ip_va = bb->va + start_offset; s->ip_gma = gma; return 0; err_unmap: i915_gem_object_unpin_map(bb->obj); err_free_obj: i915_gem_object_put(bb->obj); err_free_bb: kfree(bb); return ret; } static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) { bool second_level; int ret = 0; struct intel_vgpu *vgpu = s->vgpu; if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); return -EFAULT; } second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); return -EFAULT; } s->saved_buf_addr_type = s->buf_addr_type; addr_type_update_snb(s); if (s->buf_type == RING_BUFFER_INSTRUCTION) { s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32); s->buf_type = BATCH_BUFFER_INSTRUCTION; } else if (second_level) { s->buf_type = BATCH_BUFFER_2ND_LEVEL; s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32); s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32); } if (batch_buffer_needs_scan(s)) { ret = perform_bb_shadow(s); if (ret < 0) gvt_vgpu_err("invalid shadow batch buffer\n"); } else { /* emulate a batch buffer end to do return right */ ret = cmd_handler_mi_batch_buffer_end(s); if (ret < 0) return ret; } return ret; } static int mi_noop_index; static const struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL, 0, 1, cmd_handler_mi_user_interrupt}, {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS, D_ALL, 0, 1, cmd_handler_mi_wait_for_event}, {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END, F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1, cmd_handler_mi_batch_buffer_end}, {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR, R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip}, {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)}, {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL}, {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, 8, NULL, CMD_LEN(0)}, {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8, NULL, CMD_LEN(0)}, {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)}, {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL, 0, 8, cmd_handler_mi_store_data_index}, {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL, D_ALL, 0, 8, cmd_handler_lri}, {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10, cmd_handler_mi_update_gtt}, {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm, CMD_LEN(2)}, {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6, cmd_handler_mi_flush_dw}, {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1), 10, cmd_handler_mi_clflush}, {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count, CMD_LEN(2)}, {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm, CMD_LEN(2)}, {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, cmd_handler_lrr, CMD_LEN(1)}, {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, 8, NULL, CMD_LEN(2)}, {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)}, {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)}, {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 8, cmd_handler_mi_op_2f}, {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START, F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8, cmd_handler_mi_batch_buffer_start}, {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)}, {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST, R_RCS | R_BCS, D_ALL, 0, 2, NULL}, {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(3), 8, NULL}, {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL}, {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT", OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE, F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_VIEWPORT_STATE_POINTERS_CC", OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BLEND_STATE_POINTERS", OP_3DSTATE_BLEND_STATE_POINTERS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS", OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_POINTERS_VS", OP_3DSTATE_BINDING_TABLE_POINTERS_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_POINTERS_HS", OP_3DSTATE_BINDING_TABLE_POINTERS_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_POINTERS_DS", OP_3DSTATE_BINDING_TABLE_POINTERS_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_POINTERS_GS", OP_3DSTATE_BINDING_TABLE_POINTERS_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_POINTERS_PS", OP_3DSTATE_BINDING_TABLE_POINTERS_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_STATE_POINTERS_VS", OP_3DSTATE_SAMPLER_STATE_POINTERS_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_STATE_POINTERS_HS", OP_3DSTATE_SAMPLER_STATE_POINTERS_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_STATE_POINTERS_DS", OP_3DSTATE_SAMPLER_STATE_POINTERS_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_STATE_POINTERS_GS", OP_3DSTATE_SAMPLER_STATE_POINTERS_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_STATE_POINTERS_PS", OP_3DSTATE_SAMPLER_STATE_POINTERS_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL}, {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL}, {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL}, {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_2(2, 4), 8, NULL}, {"3DSTATE_BINDING_TABLE_POOL_ALLOC", OP_3DSTATE_BINDING_TABLE_POOL_ALLOC, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC", OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, cmd_handler_pipe_control}, {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(1), 8, NULL}, {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL}, {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(1), 8, NULL}, {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, NULL}, {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS, D_SKL_PLUS, 0, 8, NULL}, {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL}, {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL}, {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE, F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL}, {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL}, {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR, R_VCS, D_ALL, 0, 6, NULL}, {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR, R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL}, {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL}, {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL}, {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL}, {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR, R_VCS, D_ALL, 0, 12, NULL}, {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL}, {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL}, {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, 0, 12, NULL}, }; static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) { hash_add(gvt->cmd_table, &e->hlist, e->info->opcode); } /* call the cmd handler, and advance ip */ static int cmd_parser_exec(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; const struct cmd_info *info; u32 cmd; int ret = 0; cmd = cmd_val(s, 0); /* fastpath for MI_NOOP */ if (cmd == MI_NOOP) info = &cmd_info[mi_noop_index]; else info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", cmd, get_opcode(cmd, s->engine), repr_addr_type(s->buf_addr_type), s->engine->name, s->workload); return -EBADRQC; } s->info = info; trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, cmd_length(s), s->buf_type, s->buf_addr_type, s->workload, info->name); if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { ret = gvt_check_valid_cmd_length(cmd_length(s), info->valid_len); if (ret) return ret; } if (info->handler) { ret = info->handler(s); if (ret < 0) { gvt_vgpu_err("%s handler error\n", info->name); return ret; } } if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { ret = cmd_advance_default(s); if (ret) { gvt_vgpu_err("%s IP advance error\n", info->name); return ret; } } return 0; } static inline bool gma_out_of_range(unsigned long gma, unsigned long gma_head, unsigned int gma_tail) { if (gma_tail >= gma_head) return (gma < gma_head) || (gma > gma_tail); else return (gma > gma_tail) && (gma < gma_head); } /* Keep the consistent return type, e.g EBADRQC for unknown * cmd, EFAULT for invalid address, EPERM for nonpriv. later * works as the input of VM healthy status. */ static int command_scan(struct parser_exec_state *s, unsigned long rb_head, unsigned long rb_tail, unsigned long rb_start, unsigned long rb_len) { unsigned long gma_head, gma_tail, gma_bottom; int ret = 0; struct intel_vgpu *vgpu = s->vgpu; gma_head = rb_start + rb_head; gma_tail = rb_start + rb_tail; gma_bottom = rb_start + rb_len; while (s->ip_gma != gma_tail) { if (s->buf_type == RING_BUFFER_INSTRUCTION || s->buf_type == RING_BUFFER_CTX) { if (!(s->ip_gma >= rb_start) || !(s->ip_gma < gma_bottom)) { gvt_vgpu_err("ip_gma %lx out of ring scope." "(base:0x%lx, bottom: 0x%lx)\n", s->ip_gma, rb_start, gma_bottom); parser_exec_state_dump(s); return -EFAULT; } if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { gvt_vgpu_err("ip_gma %lx out of range." "base 0x%lx head 0x%lx tail 0x%lx\n", s->ip_gma, rb_start, rb_head, rb_tail); parser_exec_state_dump(s); break; } } ret = cmd_parser_exec(s); if (ret) { gvt_vgpu_err("cmd parser error\n"); parser_exec_state_dump(s); break; } } return ret; } static int scan_workload(struct intel_vgpu_workload *workload) { unsigned long gma_head, gma_tail; struct parser_exec_state s; int ret = 0; /* ring base is page aligned */ if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE))) return -EINVAL; gma_head = workload->rb_start + workload->rb_head; gma_tail = workload->rb_start + workload->rb_tail; s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; s.vgpu = workload->vgpu; s.engine = workload->engine; s.ring_start = workload->rb_start; s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); s.ring_head = gma_head; s.ring_tail = gma_tail; s.rb_va = workload->shadow_ring_buffer_va; s.workload = workload; s.is_ctx_wa = false; if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail) return 0; ret = ip_gma_set(&s, gma_head); if (ret) goto out; ret = command_scan(&s, workload->rb_head, workload->rb_tail, workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl)); out: return ret; } static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { unsigned long gma_head, gma_tail, ring_size, ring_tail; struct parser_exec_state s; int ret = 0; struct intel_vgpu_workload *workload = container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); /* ring base is page aligned */ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, I915_GTT_PAGE_SIZE))) return -EINVAL; ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32); ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES, PAGE_SIZE); gma_head = wa_ctx->indirect_ctx.guest_gma; gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail; s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; s.vgpu = workload->vgpu; s.engine = workload->engine; s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_size = ring_size; s.ring_head = gma_head; s.ring_tail = gma_tail; s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.workload = workload; s.is_ctx_wa = true; ret = ip_gma_set(&s, gma_head); if (ret) goto out; ret = command_scan(&s, 0, ring_tail, wa_ctx->indirect_ctx.guest_gma, ring_size); out: return ret; } static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; void *shadow_ring_buffer_va; int ret; guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); /* calculate workload ring buffer size */ workload->rb_len = (workload->rb_tail + guest_rb_size - workload->rb_head) % guest_rb_size; gma_head = workload->rb_start + workload->rb_head; gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { void *p; /* realloc the new ring buffer if needed */ p = krealloc(s->ring_scan_buffer[workload->engine->id], workload->rb_len, GFP_KERNEL); if (!p) { gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); return -ENOMEM; } s->ring_scan_buffer[workload->engine->id] = p; s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; } shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = shadow_ring_buffer_va; /* head > tail --> copy head <-> top */ if (gma_head > gma_tail) { ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_top, shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } shadow_ring_buffer_va += ret; gma_head = workload->rb_start; } /* copy head or start <-> tail */ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } return 0; } int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload) { int ret; struct intel_vgpu *vgpu = workload->vgpu; ret = shadow_workload_ring_buffer(workload); if (ret) { gvt_vgpu_err("fail to shadow workload ring_buffer\n"); return ret; } ret = scan_workload(workload); if (ret) { gvt_vgpu_err("scan workload error\n"); return ret; } return 0; } static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; struct intel_vgpu_workload *workload = container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_gem_object *obj; int ret = 0; void *map; obj = i915_gem_object_create_shmem(workload->engine->i915, roundup(ctx_size + CACHELINE_BYTES, PAGE_SIZE)); if (IS_ERR(obj)) return PTR_ERR(obj); /* get the va of the shadow batch buffer */ map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(map)) { gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); ret = PTR_ERR(map); goto put_obj; } i915_gem_object_lock(obj, NULL); ret = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (ret) { gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); goto unmap_src; } ret = copy_gma_to_hva(workload->vgpu, workload->vgpu->gtt.ggtt_mm, guest_gma, guest_gma + ctx_size, map); if (ret < 0) { gvt_vgpu_err("fail to copy guest indirect ctx\n"); goto unmap_src; } wa_ctx->indirect_ctx.obj = obj; wa_ctx->indirect_ctx.shadow_va = map; return 0; unmap_src: i915_gem_object_unpin_map(obj); put_obj: i915_gem_object_put(obj); return ret; } static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { u32 per_ctx_start[CACHELINE_DWORDS] = {0}; unsigned char *bb_start_sva; if (!wa_ctx->per_ctx.valid) return 0; per_ctx_start[0] = 0x18800001; per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va + wa_ctx->indirect_ctx.size; memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES); return 0; } int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ret; struct intel_vgpu_workload *workload = container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); struct intel_vgpu *vgpu = workload->vgpu; if (wa_ctx->indirect_ctx.size == 0) return 0; ret = shadow_indirect_ctx(wa_ctx); if (ret) { gvt_vgpu_err("fail to shadow indirect ctx\n"); return ret; } combine_wa_ctx(wa_ctx); ret = scan_wa_ctx(wa_ctx); if (ret) { gvt_vgpu_err("scan wa ctx error\n"); return ret; } return 0; } /* generate dummy contexts by sending empty requests to HW, and let * the HW to fill Engine Contexts. This dummy contexts are used for * initialization purpose (update reg whitelist), so referred to as * init context here */ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) { const unsigned long start = LRC_STATE_PN * PAGE_SIZE; struct intel_gvt *gvt = vgpu->gvt; struct intel_engine_cs *engine; enum intel_engine_id id; if (gvt->is_reg_whitelist_updated) return; /* scan init ctx to update cmd accessible list */ for_each_engine(engine, gvt->gt, id) { struct parser_exec_state s; void *vaddr; int ret; if (!engine->default_state) continue; vaddr = shmem_pin_map(engine->default_state); if (!vaddr) { gvt_err("failed to map %s->default state\n", engine->name); return; } s.buf_type = RING_BUFFER_CTX; s.buf_addr_type = GTT_BUFFER; s.vgpu = vgpu; s.engine = engine; s.ring_start = 0; s.ring_size = engine->context_size - start; s.ring_head = 0; s.ring_tail = s.ring_size; s.rb_va = vaddr + start; s.workload = NULL; s.is_ctx_wa = false; s.is_init_ctx = true; /* skipping the first RING_CTX_SIZE(0x50) dwords */ ret = ip_gma_set(&s, RING_CTX_SIZE); if (ret == 0) { ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); if (ret) gvt_err("Scan init ctx error\n"); } shmem_unpin_map(engine->default_state, vaddr); if (ret) return; } gvt->is_reg_whitelist_updated = true; } int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; unsigned long gma_head, gma_tail, gma_start, ctx_size; struct parser_exec_state s; int ring_id = workload->engine->id; struct intel_context *ce = vgpu->submission.shadow[ring_id]; int ret; GEM_BUG_ON(atomic_read(&ce->pin_count) < 0); ctx_size = workload->engine->context_size - PAGE_SIZE; /* Only ring contxt is loaded to HW for inhibit context, no need to * scan engine context */ if (is_inhibit_context(ce)) return 0; gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE; gma_head = 0; gma_tail = ctx_size; s.buf_type = RING_BUFFER_CTX; s.buf_addr_type = GTT_BUFFER; s.vgpu = workload->vgpu; s.engine = workload->engine; s.ring_start = gma_start; s.ring_size = ctx_size; s.ring_head = gma_start + gma_head; s.ring_tail = gma_start + gma_tail; s.rb_va = ce->lrc_reg_state; s.workload = workload; s.is_ctx_wa = false; s.is_init_ctx = false; /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring * context */ ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); if (ret) goto out; ret = command_scan(&s, gma_head, gma_tail, gma_start, ctx_size); out: if (ret) gvt_vgpu_err("scan shadow ctx error\n"); return ret; } static int init_cmd_table(struct intel_gvt *gvt) { unsigned int gen_type = intel_gvt_get_device_type(gvt); int i; for (i = 0; i < ARRAY_SIZE(cmd_info); i++) { struct cmd_entry *e; if (!(cmd_info[i].devices & gen_type)) continue; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) return -ENOMEM; e->info = &cmd_info[i]; if (cmd_info[i].opcode == OP_MI_NOOP) mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n", e->info->name, e->info->opcode, e->info->flag, e->info->devices, e->info->rings); } return 0; } static void clean_cmd_table(struct intel_gvt *gvt) { struct hlist_node *tmp; struct cmd_entry *e; int i; hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist) kfree(e); hash_init(gvt->cmd_table); } void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt) { clean_cmd_table(gvt); } int intel_gvt_init_cmd_parser(struct intel_gvt *gvt) { int ret; ret = init_cmd_table(gvt); if (ret) { intel_gvt_clean_cmd_parser(gvt); return ret; } return 0; }
linux-master
drivers/gpu/drm/i915/gvt/cmd_parser.c
/* * Copyright 2017 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Zhiyuan Lv <[email protected]> * * Contributors: * Xiaoguang Chen * Tina Zhang <[email protected]> */ #include <linux/dma-buf.h> #include <linux/mdev.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> #include "gem/i915_gem_dmabuf.h" #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct intel_vgpu *vgpu; struct sg_table *st; struct scatterlist *sg; int i, j, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; unsigned int page_num; /* limited by sg_alloc_table */ if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num)) return -E2BIG; page_num = obj->base.size >> PAGE_SHIFT; fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; if (drm_WARN_ON(&dev_priv->drm, !fb_info)) return -ENODEV; vgpu = fb_info->obj->vgpu; if (drm_WARN_ON(&dev_priv->drm, !vgpu)) return -ENODEV; st = kmalloc(sizeof(*st), GFP_KERNEL); if (unlikely(!st)) return -ENOMEM; ret = sg_alloc_table(st, page_num, GFP_KERNEL); if (ret) { kfree(st); return ret; } gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm + (fb_info->start >> PAGE_SHIFT); for_each_sg(st->sgl, sg, page_num, i) { dma_addr_t dma_addr = GEN8_DECODE_PTE(readq(&gtt_entries[i])); if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { ret = -EINVAL; goto out; } sg->offset = 0; sg->length = PAGE_SIZE; sg_dma_len(sg) = PAGE_SIZE; sg_dma_address(sg) = dma_addr; } __i915_gem_object_set_pages(obj, st); out: if (ret) { dma_addr_t dma_addr; for_each_sg(st->sgl, sg, i, j) { dma_addr = sg_dma_address(sg); if (dma_addr) intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); } sg_free_table(st); kfree(st); } return ret; } static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { struct scatterlist *sg; if (obj->base.dma_buf) { struct intel_vgpu_fb_info *fb_info = obj->gvt_info; struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; struct intel_vgpu *vgpu = obj->vgpu; int i; for_each_sg(pages->sgl, sg, fb_info->size, i) intel_gvt_dma_unmap_guest_page(vgpu, sg_dma_address(sg)); } sg_free_table(pages); kfree(pages); } static void dmabuf_gem_object_free(struct kref *kref) { struct intel_vgpu_dmabuf_obj *obj = container_of(kref, struct intel_vgpu_dmabuf_obj, kref); struct intel_vgpu *vgpu = obj->vgpu; struct list_head *pos; struct intel_vgpu_dmabuf_obj *dmabuf_obj; if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) && !list_empty(&vgpu->dmabuf_obj_list_head)) { list_for_each(pos, &vgpu->dmabuf_obj_list_head) { dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); kfree(dmabuf_obj); break; } } } else { /* Free the orphan dmabuf_objs here */ kfree(obj->info); kfree(obj); } } static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj) { kref_get(&obj->kref); } static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj) { kref_put(&obj->kref, dmabuf_gem_object_free); } static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj) { struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info; struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; struct intel_vgpu *vgpu = obj->vgpu; if (vgpu) { mutex_lock(&vgpu->dmabuf_lock); gem_obj->base.dma_buf = NULL; dmabuf_obj_put(obj); mutex_unlock(&vgpu->dmabuf_lock); } else { /* vgpu is NULL, as it has been removed already */ gem_obj->base.dma_buf = NULL; dmabuf_obj_put(obj); } } static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { .name = "i915_gem_object_vgpu", .flags = I915_GEM_OBJECT_IS_PROXY, .get_pages = vgpu_gem_get_pages, .put_pages = vgpu_gem_put_pages, .release = vgpu_gem_release, }; static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct intel_vgpu_fb_info *info) { static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0); i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; if (GRAPHICS_VER(dev_priv) >= 9) { unsigned int tiling_mode = 0; unsigned int stride = 0; switch (info->drm_format_mod) { case DRM_FORMAT_MOD_LINEAR: tiling_mode = I915_TILING_NONE; break; case I915_FORMAT_MOD_X_TILED: tiling_mode = I915_TILING_X; stride = info->stride; break; case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: tiling_mode = I915_TILING_Y; stride = info->stride; break; default: gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", info->drm_format_mod); } obj->tiling_and_stride = tiling_mode | stride; } else { obj->tiling_and_stride = info->drm_format_mod ? I915_TILING_X : 0; } return obj; } static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) { if (c && c->x_hot <= c->width && c->y_hot <= c->height) return true; else return false; } static int vgpu_get_plane_info(struct drm_device *dev, struct intel_vgpu *vgpu, struct intel_vgpu_fb_info *info, int plane_id) { struct intel_vgpu_primary_plane_format p; struct intel_vgpu_cursor_plane_format c; int ret, tile_height = 1; memset(info, 0, sizeof(*info)); if (plane_id == DRM_PLANE_TYPE_PRIMARY) { ret = intel_vgpu_decode_primary_plane(vgpu, &p); if (ret) return ret; info->start = p.base; info->start_gpa = p.base_gpa; info->width = p.width; info->height = p.height; info->stride = p.stride; info->drm_format = p.drm_format; switch (p.tiled) { case PLANE_CTL_TILED_LINEAR: info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; break; case PLANE_CTL_TILED_X: info->drm_format_mod = I915_FORMAT_MOD_X_TILED; tile_height = 8; break; case PLANE_CTL_TILED_Y: info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; tile_height = 32; break; case PLANE_CTL_TILED_YF: info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; tile_height = 32; break; default: gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); } } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { ret = intel_vgpu_decode_cursor_plane(vgpu, &c); if (ret) return ret; info->start = c.base; info->start_gpa = c.base_gpa; info->width = c.width; info->height = c.height; info->stride = c.width * (c.bpp / 8); info->drm_format = c.drm_format; info->drm_format_mod = 0; info->x_pos = c.x_pos; info->y_pos = c.y_pos; if (validate_hotspot(&c)) { info->x_hot = c.x_hot; info->y_hot = c.y_hot; } else { info->x_hot = UINT_MAX; info->y_hot = UINT_MAX; } } else { gvt_vgpu_err("invalid plane id:%d\n", plane_id); return -EINVAL; } info->size = info->stride * roundup(info->height, tile_height); if (info->size == 0) { gvt_vgpu_err("fb size is zero\n"); return -EINVAL; } if (info->start & (PAGE_SIZE - 1)) { gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); return -EFAULT; } if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) { gvt_vgpu_err("invalid gma addr\n"); return -EFAULT; } return 0; } static struct intel_vgpu_dmabuf_obj * pick_dmabuf_by_info(struct intel_vgpu *vgpu, struct intel_vgpu_fb_info *latest_info) { struct list_head *pos; struct intel_vgpu_fb_info *fb_info; struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (!dmabuf_obj->info) continue; fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; if ((fb_info->start == latest_info->start) && (fb_info->start_gpa == latest_info->start_gpa) && (fb_info->size == latest_info->size) && (fb_info->drm_format_mod == latest_info->drm_format_mod) && (fb_info->drm_format == latest_info->drm_format) && (fb_info->width == latest_info->width) && (fb_info->height == latest_info->height)) { ret = dmabuf_obj; break; } } return ret; } static struct intel_vgpu_dmabuf_obj * pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) { struct list_head *pos; struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL; struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj->dmabuf_id == id) { ret = dmabuf_obj; break; } } return ret; } static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, struct intel_vgpu_fb_info *fb_info) { gvt_dmabuf->drm_format = fb_info->drm_format; gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; gvt_dmabuf->width = fb_info->width; gvt_dmabuf->height = fb_info->height; gvt_dmabuf->stride = fb_info->stride; gvt_dmabuf->size = fb_info->size; gvt_dmabuf->x_pos = fb_info->x_pos; gvt_dmabuf->y_pos = fb_info->y_pos; gvt_dmabuf->x_hot = fb_info->x_hot; gvt_dmabuf->y_hot = fb_info->y_hot; } int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) { struct drm_device *dev = &vgpu->gvt->gt->i915->drm; struct vfio_device_gfx_plane_info *gfx_plane_info = args; struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_fb_info fb_info; int ret = 0; if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF | VFIO_GFX_PLANE_TYPE_PROBE)) return ret; else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) || (!gfx_plane_info->flags)) return -EINVAL; ret = vgpu_get_plane_info(dev, vgpu, &fb_info, gfx_plane_info->drm_plane_type); if (ret != 0) goto out; mutex_lock(&vgpu->dmabuf_lock); /* If exists, pick up the exposed dmabuf_obj */ dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info); if (dmabuf_obj) { update_fb_info(gfx_plane_info, &fb_info); gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id; /* This buffer may be released between query_plane ioctl and * get_dmabuf ioctl. Add the refcount to make sure it won't * be released between the two ioctls. */ if (!dmabuf_obj->initref) { dmabuf_obj->initref = true; dmabuf_obj_get(dmabuf_obj); } ret = 0; gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n", vgpu->id, kref_read(&dmabuf_obj->kref), gfx_plane_info->dmabuf_id); mutex_unlock(&vgpu->dmabuf_lock); goto out; } mutex_unlock(&vgpu->dmabuf_lock); /* Need to allocate a new one*/ dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL); if (unlikely(!dmabuf_obj)) { gvt_vgpu_err("alloc dmabuf_obj failed\n"); ret = -ENOMEM; goto out; } dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info), GFP_KERNEL); if (unlikely(!dmabuf_obj->info)) { gvt_vgpu_err("allocate intel vgpu fb info failed\n"); ret = -ENOMEM; goto out_free_dmabuf; } memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info)); ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj; dmabuf_obj->vgpu = vgpu; ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT); if (ret < 0) goto out_free_info; gfx_plane_info->dmabuf_id = ret; dmabuf_obj->dmabuf_id = ret; dmabuf_obj->initref = true; kref_init(&dmabuf_obj->kref); update_fb_info(gfx_plane_info, &fb_info); INIT_LIST_HEAD(&dmabuf_obj->list); mutex_lock(&vgpu->dmabuf_lock); list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head); mutex_unlock(&vgpu->dmabuf_lock); gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id, __func__, kref_read(&dmabuf_obj->kref), ret); return 0; out_free_info: kfree(dmabuf_obj->info); out_free_dmabuf: kfree(dmabuf_obj); out: /* ENODEV means plane isn't ready, which might be a normal case. */ return (ret == -ENODEV) ? 0 : ret; } /* To associate an exposed dmabuf with the dmabuf_obj */ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) { struct drm_device *dev = &vgpu->gvt->gt->i915->drm; struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; int dmabuf_fd; int ret = 0; mutex_lock(&vgpu->dmabuf_lock); dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id); if (dmabuf_obj == NULL) { gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id); ret = -EINVAL; goto out; } obj = vgpu_create_gem(dev, dmabuf_obj->info); if (obj == NULL) { gvt_vgpu_err("create gvt gem obj failed\n"); ret = -ENOMEM; goto out; } obj->gvt_info = dmabuf_obj->info; dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); if (IS_ERR(dmabuf)) { gvt_vgpu_err("export dma-buf failed\n"); ret = PTR_ERR(dmabuf); goto out_free_gem; } ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); if (ret < 0) { gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); goto out_free_dmabuf; } dmabuf_fd = ret; dmabuf_obj_get(dmabuf_obj); if (dmabuf_obj->initref) { dmabuf_obj->initref = false; dmabuf_obj_put(dmabuf_obj); } mutex_unlock(&vgpu->dmabuf_lock); gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n" " file count: %ld, GEM ref: %d\n", vgpu->id, dmabuf_obj->dmabuf_id, kref_read(&dmabuf_obj->kref), dmabuf_fd, file_count(dmabuf->file), kref_read(&obj->base.refcount)); i915_gem_object_put(obj); return dmabuf_fd; out_free_dmabuf: dma_buf_put(dmabuf); out_free_gem: i915_gem_object_put(obj); out: mutex_unlock(&vgpu->dmabuf_lock); return ret; } void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_dmabuf_obj *dmabuf_obj; mutex_lock(&vgpu->dmabuf_lock); list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ if (dmabuf_obj->initref) { dmabuf_obj->initref = false; dmabuf_obj_put(dmabuf_obj); } } mutex_unlock(&vgpu->dmabuf_lock); }
linux-master
drivers/gpu/drm/i915/gvt/dmabuf.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Jike Song <[email protected]> * * Contributors: * Zhi Wang <[email protected]> * */ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" #endif
linux-master
drivers/gpu/drm/i915/gvt/trace_points.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eddie Dong <[email protected]> * Jike Song <[email protected]> * * Contributors: * Zhi Wang <[email protected]> * Min He <[email protected]> * Bing Niu <[email protected]> * */ #include "i915_drv.h" #include "gvt.h" #include "intel_pci_config.h" enum { INTEL_GVT_PCI_BAR_GTTMMIO = 0, INTEL_GVT_PCI_BAR_APERTURE, INTEL_GVT_PCI_BAR_PIO, INTEL_GVT_PCI_BAR_MAX, }; /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one * byte) byte by byte in standard pci configuration space. (not the full * 256 bytes.) */ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { [PCI_COMMAND] = 0xff, 0x07, [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ [PCI_CACHE_LINE_SIZE] = 0xff, [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, [PCI_INTERRUPT_LINE] = 0xff, }; /** * vgpu_pci_cfg_mem_write - write virtual cfg space memory * @vgpu: target vgpu * @off: offset * @src: src ptr to write * @bytes: number of bytes * * Use this function to write virtual cfg space memory. * For standard cfg space, only RW bits can be changed, * and we emulates the RW1C behavior of PCI_STATUS register. */ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, u8 *src, unsigned int bytes) { u8 *cfg_base = vgpu_cfg_space(vgpu); u8 mask, new, old; pci_power_t pwr; int i = 0; for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { mask = pci_cfg_space_rw_bmp[off + i]; old = cfg_base[off + i]; new = src[i] & mask; /** * The PCI_STATUS high byte has RW1C bits, here * emulates clear by writing 1 for these bits. * Writing a 0b to RW1C bits has no effect. */ if (off + i == PCI_STATUS + 1) new = (~new & old) & mask; cfg_base[off + i] = (old & ~mask) | new; } /* For other configuration space directly copy as it is. */ if (i < bytes) memcpy(cfg_base + off + i, src + i, bytes - i); if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) { pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off]) & PCI_PM_CTRL_STATE_MASK); if (pwr == PCI_D3hot) vgpu->d3_entered = true; gvt_dbg_core("vgpu-%d power status changed to %d\n", vgpu->id, pwr); } } /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read * @vgpu: target vgpu * @offset: offset * @p_data: return data ptr * @bytes: number of bytes to read * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; if (drm_WARN_ON(&i915->drm, bytes > 4)) return -EINVAL; if (drm_WARN_ON(&i915->drm, offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); return 0; } static void map_aperture(struct intel_vgpu *vgpu, bool map) { if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; } static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; } static int emulate_pci_command_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u8 old = vgpu_cfg_space(vgpu)[offset]; u8 new = *(u8 *)p_data; u8 changed = old ^ new; vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; if (old & PCI_COMMAND_MEMORY) { trap_gttmmio(vgpu, false); map_aperture(vgpu, false); } else { trap_gttmmio(vgpu, true); map_aperture(vgpu, true); } return 0; } static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); u32 new = *(u32 *)(p_data); if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK) /* We don't have rom, return size of 0. */ *pval = 0; else vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); return 0; } static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; /* * Power-up software can determine how much address * space the device requires by writing a value of * all 1's to the register and then reading the value * back. The device will return 0's in all don't-care * address bits. */ if (new == 0xffffffff) { switch (offset) { case PCI_BASE_ADDRESS_0: case PCI_BASE_ADDRESS_1: size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); intel_vgpu_write_pci_bar(vgpu, offset, size >> (lo ? 0 : 32), lo); /* * Untrap the BAR, since guest hasn't configured a * valid GPA */ trap_gttmmio(vgpu, false); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); intel_vgpu_write_pci_bar(vgpu, offset, size >> (lo ? 0 : 32), lo); map_aperture(vgpu, false); break; default: /* Unimplemented BARs */ intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); } } else { switch (offset) { case PCI_BASE_ADDRESS_0: case PCI_BASE_ADDRESS_1: /* * Untrap the old BAR first, since guest has * re-configured the BAR */ trap_gttmmio(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); trap_gttmmio(vgpu, mmio_enabled); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: map_aperture(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); map_aperture(vgpu, mmio_enabled); break; default: intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } } /** * intel_vgpu_emulate_cfg_write - emulate vGPU configuration space write * @vgpu: target vgpu * @offset: offset * @p_data: write data ptr * @bytes: number of bytes to write * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int ret; if (drm_WARN_ON(&i915->drm, bytes > 4)) return -EINVAL; if (drm_WARN_ON(&i915->drm, offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; /* First check if it's PCI_COMMAND */ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { if (drm_WARN_ON(&i915->drm, bytes > 2)) return -EINVAL; return emulate_pci_command_write(vgpu, offset, p_data, bytes); } switch (rounddown(offset, 4)) { case PCI_ROM_ADDRESS: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes); case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; emulate_pci_bar_write(vgpu, offset, p_data, bytes); break; case INTEL_GVT_PCI_SWSCI: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data); if (ret) return ret; break; case INTEL_GVT_PCI_OPREGION: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; ret = intel_vgpu_opregion_base_write_handler(vgpu, *(u32 *)p_data); if (ret) return ret; vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); break; default: vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); break; } return 0; } /** * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU * * @vgpu: a vGPU * @primary: is the vGPU presented as primary * */ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, bool primary) { struct intel_gvt *gvt = vgpu->gvt; struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; u8 next; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); if (!primary) { vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = INTEL_GVT_PCI_CLASS_VGA_OTHER; vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = INTEL_GVT_PCI_CLASS_VGA_OTHER; } /* Show guest that there isn't any stolen memory.*/ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, gvt_aperture_pa_base(gvt), true); vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* * Clear the bar upper 32bit and let guest to assign the new value */ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = pci_resource_len(pdev, GEN4_GTTMMADR_BAR); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = pci_resource_len(pdev, GEN4_GMADR_BAR); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); /* PM Support */ vgpu->cfg_space.pmcsr_off = 0; if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) { next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST]; do { if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) { vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL; break; } next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT]; } while (next); } } /** * intel_vgpu_reset_cfg_space - reset vGPU configuration space * * @vgpu: a vGPU * */ void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) { u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != INTEL_GVT_PCI_CLASS_VGA_OTHER; if (cmd & PCI_COMMAND_MEMORY) { trap_gttmmio(vgpu, false); map_aperture(vgpu, false); } /** * Currently we only do such reset when vGPU is not * owned by any VM, so we simply restore entire cfg * space to default value. */ intel_vgpu_init_cfg_space(vgpu, primary); }
linux-master
drivers/gpu/drm/i915/gvt/cfg_space.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eddie Dong <[email protected]> * Kevin Tian <[email protected]> * * Contributors: * Zhi Wang <[email protected]> * Changbin Du <[email protected]> * Zhenyu Wang <[email protected]> * Tina Zhang <[email protected]> * Bing Niu <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt_regs.h" #include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" #define GEN9_MOCS_SIZE 64 /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ {RCS0, _MMIO(0xb118), 0, false}, /* GEN8_L3SQCREG4 */ {RCS0, _MMIO(0xb11c), 0, false}, /* GEN9_SCRATCH1 */ {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ {RCS0, _MMIO(0xe180), 0xffff, true}, /* HALF_SLICE_CHICKEN2 */ {RCS0, _MMIO(0xe184), 0xffff, true}, /* GEN8_HALF_SLICE_CHICKEN3 */ {RCS0, _MMIO(0xe188), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN5 */ {RCS0, _MMIO(0xe194), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN7 */ {RCS0, _MMIO(0xe4f0), 0xffff, true}, /* GEN8_ROW_CHICKEN */ {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ {RCS0, TRVADR, 0, true}, /* 0x4df0 */ {RCS0, TRTTE, 0, true}, /* 0x4df4 */ {RCS0, _MMIO(0x4dfc), 0, true}, {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { bool initialized; u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; u32 l3cc_table[GEN9_MOCS_SIZE / 2]; } gen9_render_mocs; static u32 gen9_mocs_mmio_offset_list[] = { [RCS0] = 0xc800, [VCS0] = 0xc900, [VCS1] = 0xca00, [BCS0] = 0xcc00, [VECS0] = 0xcb00, }; static void load_render_mocs(const struct intel_engine_cs *engine) { struct intel_gvt *gvt = engine->i915->gvt; struct intel_uncore *uncore = engine->uncore; u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; i915_reg_t offset; int ring_id, i; /* Platform doesn't have mocs mmios. */ if (!regs) return; for (ring_id = 0; ring_id < cnt; ring_id++) { if (!HAS_ENGINE(engine->gt, ring_id)) continue; offset.reg = regs[ring_id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = intel_uncore_read_fw(uncore, offset); offset.reg += 4; } } offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { gen9_render_mocs.l3cc_table[i] = intel_uncore_read_fw(uncore, offset); offset.reg += 4; } gen9_render_mocs.initialized = true; } static int restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, struct i915_request *req) { u32 *cs; int ret; struct engine_mmio *mmio; struct intel_gvt *gvt = vgpu->gvt; int ring_id = req->engine->id; int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; if (count == 0) return 0; ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; cs = intel_ring_begin(req, count * 2 + 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(count); for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->id != ring_id || !mmio->in_context) continue; *cs++ = i915_mmio_reg_offset(mmio->reg); *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", *(cs-2), *(cs-1), vgpu->id, ring_id); } *cs++ = MI_NOOP; intel_ring_advance(req, cs); ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; return 0; } static int restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, struct i915_request *req) { unsigned int index; u32 *cs; cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); for (index = 0; index < GEN9_MOCS_SIZE; index++) { *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", *(cs-2), *(cs-1), vgpu->id, req->engine->id); } *cs++ = MI_NOOP; intel_ring_advance(req, cs); return 0; } static int restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, struct i915_request *req) { unsigned int index; u32 *cs; cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", *(cs-2), *(cs-1), vgpu->id, req->engine->id); } *cs++ = MI_NOOP; intel_ring_advance(req, cs); return 0; } /* * Use lri command to initialize the mmio which is in context state image for * inhibit context, it contains tracked engine mmio, render_mocs and * render_mocs_l3cc. */ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req) { int ret; u32 *cs; cs = intel_ring_begin(req, 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; *cs++ = MI_NOOP; intel_ring_advance(req, cs); ret = restore_context_mmio_for_inhibit(vgpu, req); if (ret) goto out; /* no MOCS register in context except render engine */ if (req->engine->id != RCS0) goto out; ret = restore_render_mocs_control_for_inhibit(vgpu, req); if (ret) goto out; ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); if (ret) goto out; out: cs = intel_ring_begin(req, 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; *cs++ = MI_NOOP; intel_ring_advance(req, cs); return ret; } static u32 gen8_tlb_mmio_offset_list[] = { [RCS0] = 0x4260, [VCS0] = 0x4264, [VCS1] = 0x4268, [BCS0] = 0x426c, [VECS0] = 0x4270, }; static void handle_tlb_pending_event(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; struct intel_vgpu_submission *s = &vgpu->submission; u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; enum forcewake_domains fw; i915_reg_t reg; if (!regs) return; if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) return; if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) return; reg = _MMIO(regs[engine->id]); /* WaForceWakeRenderDuringMmioTLBInvalidate:skl * we need to put a forcewake when invalidating RCS TLB caches, * otherwise device can go to RC6 state and interrupt invalidation * process */ fw = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ | FW_REG_WRITE); if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(uncore, fw); intel_uncore_write_fw(uncore, reg, 0x1); if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50)) gvt_vgpu_err("timeout in invalidate ring %s tlb\n", engine->name); else vgpu_vreg_t(vgpu, reg) = 0; intel_uncore_forcewake_put(uncore, fw); gvt_dbg_core("invalidate TLB for ring %s\n", engine->name); } static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, const struct intel_engine_cs *engine) { u32 regs[] = { [RCS0] = 0xc800, [VCS0] = 0xc900, [VCS1] = 0xca00, [BCS0] = 0xcc00, [VECS0] = 0xcb00, }; struct intel_uncore *uncore = engine->uncore; i915_reg_t offset, l3_offset; u32 old_v, new_v; int i; if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) return; if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9) return; if (!pre && !gen9_render_mocs.initialized) load_render_mocs(engine); offset.reg = regs[engine->id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { if (pre) old_v = vgpu_vreg_t(pre, offset); else old_v = gen9_render_mocs.control_table[engine->id][i]; if (next) new_v = vgpu_vreg_t(next, offset); else new_v = gen9_render_mocs.control_table[engine->id][i]; if (old_v != new_v) intel_uncore_write_fw(uncore, offset, new_v); offset.reg += 4; } if (engine->id == RCS0) { l3_offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) old_v = vgpu_vreg_t(pre, l3_offset); else old_v = gen9_render_mocs.l3cc_table[i]; if (next) new_v = vgpu_vreg_t(next, l3_offset); else new_v = gen9_render_mocs.l3cc_table[i]; if (old_v != new_v) intel_uncore_write_fw(uncore, l3_offset, new_v); l3_offset.reg += 4; } } } #define CTX_CONTEXT_CONTROL_VAL 0x03 bool is_inhibit_context(struct intel_context *ce) { const u32 *reg_state = ce->lrc_reg_state; u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); return inhibit_mask == (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); } /* Switch ring mmio values (context). */ static void switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, const struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; struct intel_vgpu_submission *s; struct engine_mmio *mmio; u32 old_v, new_v; if (GRAPHICS_VER(engine->i915) >= 9) switch_mocs(pre, next, engine); for (mmio = engine->i915->gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->id != engine->id) continue; /* * No need to do save or restore of the mmio which is in context * state image on gen9, it's initialized by lri command and * save or restore with context together. */ if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) continue; // save if (pre) { vgpu_vreg_t(pre, mmio->reg) = intel_uncore_read_fw(uncore, mmio->reg); if (mmio->mask) vgpu_vreg_t(pre, mmio->reg) &= ~(mmio->mask << 16); old_v = vgpu_vreg_t(pre, mmio->reg); } else { old_v = mmio->value = intel_uncore_read_fw(uncore, mmio->reg); } // restore if (next) { s = &next->submission; /* * No need to restore the mmio which is in context state * image if it's not inhibit context, it will restore * itself. */ if (mmio->in_context && !is_inhibit_context(s->shadow[engine->id])) continue; if (mmio->mask) new_v = vgpu_vreg_t(next, mmio->reg) | (mmio->mask << 16); else new_v = vgpu_vreg_t(next, mmio->reg); } else { if (mmio->in_context) continue; if (mmio->mask) new_v = mmio->value | (mmio->mask << 16); else new_v = mmio->value; } intel_uncore_write_fw(uncore, mmio->reg, new_v); trace_render_mmio(pre ? pre->id : 0, next ? next->id : 0, "switch", i915_mmio_reg_offset(mmio->reg), old_v, new_v); } if (next) handle_tlb_pending_event(next, engine); } /** * intel_gvt_switch_mmio - switch mmio context of specific engine * @pre: the last vGPU that own the engine * @next: the vGPU to switch to * @engine: the engine * * If pre is null indicates that host own the engine. If next is null * indicates that we are switching to host workload. */ void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, const struct intel_engine_cs *engine) { if (WARN(!pre && !next, "switch ring %s from host to HOST\n", engine->name)) return; gvt_dbg_render("switch ring %s from %s to %s\n", engine->name, pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); /** * We are using raw mmio access wrapper to improve the * performace for batch mmio read/write, so we need * handle forcewake mannually. */ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); switch_mmio(pre, next, engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); } /** * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list * @gvt: GVT device * */ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; if (GRAPHICS_VER(gvt->gt->i915) >= 9) { gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); } else { gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); } for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); } } }
linux-master
drivers/gpu/drm/i915/gvt/mmio_context.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Ke Yu * Kevin Tian <[email protected]> * Dexuan Cui * * Contributors: * Tina Zhang <[email protected]> * Min He <[email protected]> * Niu Bing <[email protected]> * Zhi Wang <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #include "display/intel_dpio_phy.h" #include "gt/intel_gt_regs.h" /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU * @gpa: guest physical address * * Returns: * Zero on success, negative error code if failed */ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) { u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); return gpa - gttmmio_gpa; } #define reg_is_mmio(gvt, reg) \ (reg >= 0 && reg < gvt->device_info.mmio_size) #define reg_is_gtt(gvt, reg) \ (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes, bool read) { struct intel_gvt *gvt = NULL; void *pt = NULL; unsigned int offset = 0; if (!vgpu || !p_data) return; gvt = vgpu->gvt; mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); else intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); } else if (reg_is_gtt(gvt, offset)) { offset -= gvt->device_info.gtt_start_offset; pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset; if (read) memcpy(p_data, pt, bytes); else memcpy(pt, p_data, bytes); } mutex_unlock(&vgpu->vgpu_lock); } /** * intel_vgpu_emulate_mmio_read - emulate MMIO read * @vgpu: a vGPU * @pa: guest physical address * @p_data: data return buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed */ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; unsigned int offset = 0; int ret = -EINVAL; if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (drm_WARN_ON(&i915->drm, bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) goto err; if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8)) goto err; if (drm_WARN_ON(&i915->drm, !reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset, p_data, bytes); if (ret) goto err; goto out; } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes); goto out; } if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1))) goto err; if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes))) goto err; } ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true); if (ret < 0) goto err; intel_gvt_mmio_set_accessed(gvt, offset); ret = 0; goto out; err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); out: mutex_unlock(&vgpu->vgpu_lock); return ret; } /** * intel_vgpu_emulate_mmio_write - emulate MMIO write * @vgpu: a vGPU * @pa: guest physical address * @p_data: write data buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed */ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; unsigned int offset = 0; int ret = -EINVAL; if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); return 0; } mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (drm_WARN_ON(&i915->drm, bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) goto err; if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8)) goto err; if (drm_WARN_ON(&i915->drm, !reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset, p_data, bytes); if (ret) goto err; goto out; } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes); goto out; } ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false); if (ret < 0) goto err; intel_gvt_mmio_set_accessed(gvt, offset); ret = 0; goto out; err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); out: mutex_unlock(&vgpu->vgpu_lock); return ret; } /** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU * @dmlr: whether this is device model level reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; /* uc reset hw expect GS_MIA_IN_RESET */ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; if (IS_BROXTON(vgpu->gvt->gt->i915)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= SKL_FUSE_DOWNLOAD_STATUS | SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) /* only reset the engine related, so starting with 0x44200 * interrupt include DE,display mmio related will not be * touched */ memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } } /** * intel_vgpu_init_mmio - init MMIO space * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed */ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; vgpu->mmio.vreg = vzalloc(info->mmio_size); if (!vgpu->mmio.vreg) return -ENOMEM; intel_vgpu_reset_mmio(vgpu, true); return 0; } /** * intel_vgpu_clean_mmio - clean MMIO space * @vgpu: a vGPU * */ void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { vfree(vgpu->mmio.vreg); vgpu->mmio.vreg = NULL; }
linux-master
drivers/gpu/drm/i915/gvt/mmio.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Zhi Wang <[email protected]> * * Contributors: * Ping Gao <[email protected]> * Tina Zhang <[email protected]> * Chanbin Du <[email protected]> * Min He <[email protected]> * Bing Niu <[email protected]> * Zhenyu Wang <[email protected]> * */ #include <linux/kthread.h> #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_execlists_submission.h" #include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_gem_gtt.h" #include "i915_perf_oa_regs.h" #include "gvt.h" #define RING_CTX_OFF(x) \ offsetof(struct execlist_ring_context, x) static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) { int i; for (i = 0; i < 8; i++) ring_context->pdps[i].val = pdp[7 - i]; } static void update_shadow_pdps(struct intel_vgpu_workload *workload) { struct execlist_ring_context *shadow_ring_context; struct intel_context *ctx = workload->req->context; if (WARN_ON(!workload->shadow_mm)) return; if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) return; shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state; set_context_pdp_root_pointer(shadow_ring_context, (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); } /* * when populating shadow ctx from guest, we should not overrride oa related * registers, so that they will not be overlapped by guest oa configs. Thus * made it possible to capture oa data from host for both host and guests. */ static void sr_oa_regs(struct intel_vgpu_workload *workload, u32 *reg_state, bool save) { struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; int i = 0; u32 flex_mmio[] = { i915_mmio_reg_offset(EU_PERF_CNTL0), i915_mmio_reg_offset(EU_PERF_CNTL1), i915_mmio_reg_offset(EU_PERF_CNTL2), i915_mmio_reg_offset(EU_PERF_CNTL3), i915_mmio_reg_offset(EU_PERF_CNTL4), i915_mmio_reg_offset(EU_PERF_CNTL5), i915_mmio_reg_offset(EU_PERF_CNTL6), }; if (workload->engine->id != RCS0) return; if (save) { workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { u32 state_offset = ctx_flexeu0 + i * 2; workload->flex_mmio[i] = reg_state[state_offset + 1]; } } else { reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { u32 state_offset = ctx_flexeu0 + i * 2; u32 mmio = flex_mmio[i]; reg_state[state_offset] = mmio; reg_state[state_offset + 1] = workload->flex_mmio[i]; } } } static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_context *ctx = workload->req->context; struct execlist_ring_context *shadow_ring_context; void *dst; void *context_base; unsigned long context_gpa, context_page_num; unsigned long gpa_base; /* first gpa of consecutive GPAs */ unsigned long gpa_size; /* size of consecutive GPAs */ struct intel_vgpu_submission *s = &vgpu->submission; int i; bool skip = false; int ring_id = workload->engine->id; int ret; GEM_BUG_ON(!intel_context_is_pinned(ctx)); context_base = (void *) ctx->lrc_reg_state - (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); shadow_ring_context = (void *) ctx->lrc_reg_state; sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) #define COPY_REG_MASKED(name) {\ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val),\ &shadow_ring_context->name.val, 4);\ shadow_ring_context->name.val |= 0xffff << 16;\ } COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); if (workload->engine->id == RCS0) { COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); } else if (workload->engine->id == BCS0) intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + BCS_TILE_REGISTER_VAL_OFFSET, (void *)shadow_ring_context + BCS_TILE_REGISTER_VAL_OFFSET, 4); #undef COPY_REG #undef COPY_REG_MASKED /* don't copy Ring Context (the first 0x50 dwords), * only copy the Engine Context part from guest */ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + RING_CTX_SIZE, (void *)shadow_ring_context + RING_CTX_SIZE, I915_GTT_PAGE_SIZE - RING_CTX_SIZE); sr_oa_regs(workload, (u32 *)shadow_ring_context, false); gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx", workload->engine->name, workload->ctx_desc.lrca, workload->ctx_desc.context_id, workload->ring_context_gpa); /* only need to ensure this context is not pinned/unpinned during the * period from last submission to this this submission. * Upon reaching this function, the currently submitted context is not * supposed to get unpinned. If a misbehaving guest driver ever does * this, it would corrupt itself. */ if (s->last_ctx[ring_id].valid && (s->last_ctx[ring_id].lrca == workload->ctx_desc.lrca) && (s->last_ctx[ring_id].ring_context_gpa == workload->ring_context_gpa)) skip = true; s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip) return 0; s->last_ctx[ring_id].valid = false; context_page_num = workload->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) context_page_num = 19; /* find consecutive GPAs from gma until the first inconsecutive GPA. * read from the continuous GPAs into dst virtual address */ gpa_size = 0; for (i = 2; i < context_page_num; i++) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << I915_GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Invalid guest context descriptor\n"); return -EFAULT; } if (gpa_size == 0) { gpa_base = context_gpa; dst = context_base + (i << I915_GTT_PAGE_SHIFT); } else if (context_gpa != gpa_base + gpa_size) goto read; gpa_size += I915_GTT_PAGE_SIZE; if (i == context_page_num - 1) goto read; continue; read: intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); } ret = intel_gvt_scan_engine_context(workload); if (ret) { gvt_vgpu_err("invalid cmd found in guest context pages\n"); return ret; } s->last_ctx[ring_id].valid = true; return 0; } static inline bool is_gvt_request(struct i915_request *rq) { return intel_context_force_single_submission(rq->context); } static void save_ring_hw_state(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; i915_reg_t reg; reg = RING_INSTDONE(engine->mmio_base); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = intel_uncore_read(uncore, reg); reg = RING_ACTHD(engine->mmio_base); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = intel_uncore_read(uncore, reg); reg = RING_ACTHD_UDW(engine->mmio_base); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = intel_uncore_read(uncore, reg); } static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { struct i915_request *rq = data; struct intel_gvt *gvt = container_of(nb, struct intel_gvt, shadow_ctx_notifier_block[rq->engine->id]); struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id ring_id = rq->engine->id; struct intel_vgpu_workload *workload; unsigned long flags; if (!is_gvt_request(rq)) { spin_lock_irqsave(&scheduler->mmio_context_lock, flags); if (action == INTEL_CONTEXT_SCHEDULE_IN && scheduler->engine_owner[ring_id]) { /* Switch ring from vGPU to host. */ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], NULL, rq->engine); scheduler->engine_owner[ring_id] = NULL; } spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); return NOTIFY_OK; } workload = scheduler->current_workload[ring_id]; if (unlikely(!workload)) return NOTIFY_OK; switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: spin_lock_irqsave(&scheduler->mmio_context_lock, flags); if (workload->vgpu != scheduler->engine_owner[ring_id]) { /* Switch ring from host to vGPU or vGPU to vGPU. */ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], workload->vgpu, rq->engine); scheduler->engine_owner[ring_id] = workload->vgpu; } else gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", ring_id, workload->vgpu->id); spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: save_ring_hw_state(workload->vgpu, rq->engine); atomic_set(&workload->shadow_ctx_active, 0); break; case INTEL_CONTEXT_SCHEDULE_PREEMPTED: save_ring_hw_state(workload->vgpu, rq->engine); break; default: WARN_ON(1); return NOTIFY_OK; } wake_up(&workload->shadow_ctx_status_wq); return NOTIFY_OK; } static void shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) { u64 desc = ce->lrc.desc; /* * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; ce->lrc.desc = desc; } static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct i915_request *req = workload->req; void *shadow_ring_buffer_va; u32 *cs; int err; if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* * To track whether a request has started on HW, we can emit a * breadcrumb at the beginning of the request and check its * timeline's HWSP to see if the breadcrumb has advanced past the * start of this request. Actually, the request must have the * init_breadcrumb if its timeline set has_init_bread_crumb, or the * scheduler might get a wrong state of it during reset. Since the * requests from gvt always set the has_init_breadcrumb flag, here * need to do the emit_init_breadcrumb for all the requests. */ if (req->engine->emit_init_breadcrumb) { err = req->engine->emit_init_breadcrumb(req); if (err) { gvt_vgpu_err("fail to emit init breadcrumb\n"); return err; } } /* allocate shadow ring buffer */ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); if (IS_ERR(cs)) { gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", workload->rb_len); return PTR_ERR(cs); } shadow_ring_buffer_va = workload->shadow_ring_buffer_va; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = cs; memcpy(cs, shadow_ring_buffer_va, workload->rb_len); cs += workload->rb_len / sizeof(u32); intel_ring_advance(workload->req, cs); return 0; } static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { if (!wa_ctx->indirect_ctx.obj) return; i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL); i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); wa_ctx->indirect_ctx.obj = NULL; wa_ctx->indirect_ctx.shadow_va = NULL; } static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr) { struct scatterlist *sg = pd->pt.base->mm.pages->sgl; /* This is not a good idea */ sg->dma_address = addr; } static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct intel_context *ce) { struct intel_vgpu_mm *mm = workload->shadow_mm; struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); int i = 0; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]); } else { for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); /* skip now as current i915 ppgtt alloc won't allocate top level pdp for non 4-level table, won't impact shadow ppgtt. */ if (!pd) break; set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]); } } } static int intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct i915_request *rq; if (workload->req) return 0; rq = i915_request_create(s->shadow[workload->engine->id]); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); return PTR_ERR(rq); } workload->req = i915_request_get(rq); return 0; } /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. * @workload: an abstract entity for each execlist submission. * * This function is called before the workload submitting to i915, to make * sure the content of the workload is valid. */ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; int ret; lockdep_assert_held(&vgpu->vgpu_lock); if (workload->shadow) return 0; if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated)) shadow_context_descriptor_update(s->shadow[workload->engine->id], workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) return ret; if (workload->engine->id == RCS0 && workload->wa_ctx.indirect_ctx.size) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; } workload->shadow = true; return 0; err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); return ret; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { struct intel_gvt *gvt = workload->vgpu->gvt; const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; struct intel_vgpu_shadow_bb *bb; struct i915_gem_ww_ctx ww; int ret; list_for_each_entry(bb, &workload->shadow_bb, list) { /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va * is only updated into ring_scan_buffer, not real ring address * allocated in later copy_workload_to_ring_buffer. pls be noted * shadow_ring_buffer_va is now pointed to real ring buffer va * in copy_workload_to_ring_buffer. */ if (bb->bb_offset) bb->bb_start_cmd_va = workload->shadow_ring_buffer_va + bb->bb_offset; /* * For non-priv bb, scan&shadow is only for * debugging purpose, so the content of shadow bb * is the same as original bb. Therefore, * here, rather than switch to shadow bb's gma * address, we directly use original batch buffer's * gma address, and send original bb to hardware * directly */ if (!bb->ppgtt) { i915_gem_ww_ctx_init(&ww, false); retry: i915_gem_object_lock(bb->obj, &ww); bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww, NULL, 0, 0, 0); if (IS_ERR(bb->vma)) { ret = PTR_ERR(bb->vma); if (ret == -EDEADLK) { ret = i915_gem_ww_ctx_backoff(&ww); if (!ret) goto retry; } goto err; } /* relocate shadow batch buffer */ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); if (gmadr_bytes == 8) bb->bb_start_cmd_va[2] = 0; ret = i915_vma_move_to_active(bb->vma, workload->req, __EXEC_OBJECT_NO_REQUEST_AWAIT); if (ret) goto err; /* No one is going to touch shadow bb from now on. */ i915_gem_object_flush_map(bb->obj); i915_gem_ww_ctx_fini(&ww); } } return 0; err: i915_gem_ww_ctx_fini(&ww); release_shadow_batch_buffer(workload); return ret; } static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) { struct intel_vgpu_workload *workload = container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); struct i915_request *rq = workload->req; struct execlist_ring_context *shadow_ring_context = (struct execlist_ring_context *)rq->context->lrc_reg_state; shadow_ring_context->bb_per_ctx_ptr.val = (shadow_ring_context->bb_per_ctx_ptr.val & (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; shadow_ring_context->rcs_indirect_ctx.val = (shadow_ring_context->rcs_indirect_ctx.val & (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; } static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { struct i915_vma *vma; unsigned char *per_ctx_va = (unsigned char *)wa_ctx->indirect_ctx.shadow_va + wa_ctx->indirect_ctx.size; struct i915_gem_ww_ctx ww; int ret; if (wa_ctx->indirect_ctx.size == 0) return 0; i915_gem_ww_ctx_init(&ww, false); retry: i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww); vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { ret = PTR_ERR(vma); if (ret == -EDEADLK) { ret = i915_gem_ww_ctx_backoff(&ww); if (!ret) goto retry; } return ret; } i915_gem_ww_ctx_fini(&ww); /* FIXME: we are not tracking our pinned VMA leaving it * up to the core to fix up the stray pin_count upon * free. */ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); memset(per_ctx_va, 0, CACHELINE_BYTES); update_wa_ctx_2_shadow_ctx(wa_ctx); return 0; } static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) { vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = workload->rb_start; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu_shadow_bb *bb, *pos; if (list_empty(&workload->shadow_bb)) return; bb = list_first_entry(&workload->shadow_bb, struct intel_vgpu_shadow_bb, list); list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { i915_gem_object_lock(bb->obj, NULL); if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); if (bb->vma && !IS_ERR(bb->vma)) i915_vma_unpin(bb->vma); i915_gem_object_unlock(bb->obj); i915_gem_object_put(bb->obj); } list_del(&bb->list); kfree(bb); } } static int intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_mm *m; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); if (ret) { gvt_vgpu_err("fail to vgpu pin mm\n"); return ret; } if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || !workload->shadow_mm->ppgtt_mm.shadowed) { intel_vgpu_unpin_mm(workload->shadow_mm); gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); return -EINVAL; } if (!list_empty(&workload->lri_shadow_mm)) { list_for_each_entry(m, &workload->lri_shadow_mm, ppgtt_mm.link) { ret = intel_vgpu_pin_mm(m); if (ret) { list_for_each_entry_from_reverse(m, &workload->lri_shadow_mm, ppgtt_mm.link) intel_vgpu_unpin_mm(m); gvt_vgpu_err("LRI shadow ppgtt fail to pin\n"); break; } } } if (ret) intel_vgpu_unpin_mm(workload->shadow_mm); return ret; } static void intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) { struct intel_vgpu_mm *m; if (!list_empty(&workload->lri_shadow_mm)) { list_for_each_entry(m, &workload->lri_shadow_mm, ppgtt_mm.link) intel_vgpu_unpin_mm(m); } intel_vgpu_unpin_mm(workload->shadow_mm); } static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; int ret = 0; ret = intel_vgpu_shadow_mm_pin(workload); if (ret) { gvt_vgpu_err("fail to pin shadow mm\n"); return ret; } update_shadow_pdps(workload); set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]); ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); goto err_unpin_mm; } ret = intel_vgpu_flush_post_shadow(workload->vgpu); if (ret) { gvt_vgpu_err("fail to flush post shadow\n"); goto err_unpin_mm; } ret = copy_workload_to_ring_buffer(workload); if (ret) { gvt_vgpu_err("fail to generate request\n"); goto err_unpin_mm; } ret = prepare_shadow_batch_buffer(workload); if (ret) { gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); goto err_unpin_mm; } ret = prepare_shadow_wa_ctx(&workload->wa_ctx); if (ret) { gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); goto err_shadow_batch; } if (workload->prepare) { ret = workload->prepare(workload); if (ret) goto err_shadow_wa_ctx; } return 0; err_shadow_wa_ctx: release_shadow_wa_ctx(&workload->wa_ctx); err_shadow_batch: release_shadow_batch_buffer(workload); err_unpin_mm: intel_vgpu_shadow_mm_unpin(workload); return ret; } static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct i915_request *rq; int ret; gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n", workload->engine->name, workload); mutex_lock(&vgpu->vgpu_lock); ret = intel_gvt_workload_req_alloc(workload); if (ret) goto err_req; ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; ret = populate_shadow_context(workload); if (ret) { release_shadow_wa_ctx(&workload->wa_ctx); goto out; } ret = prepare_workload(workload); out: if (ret) { /* We might still need to add request with * clean ctx to retire it properly.. */ rq = fetch_and_zero(&workload->req); i915_request_put(rq); } if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %s submit workload to i915 %p\n", workload->engine->name, workload->req); i915_request_add(workload->req); workload->dispatched = true; } err_req: if (ret) workload->status = ret; mutex_unlock(&vgpu->vgpu_lock); return ret; } static struct intel_vgpu_workload * pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload * bail out */ if (!scheduler->current_vgpu) { gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name); goto out; } if (scheduler->need_reschedule) { gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name); goto out; } if (!test_bit(INTEL_VGPU_STATUS_ACTIVE, scheduler->current_vgpu->status) || list_empty(workload_q_head(scheduler->current_vgpu, engine))) goto out; /* * still have current workload, maybe the workload disptacher * fail to submit it for some reason, resubmit it. */ if (scheduler->current_workload[engine->id]) { workload = scheduler->current_workload[engine->id]; gvt_dbg_sched("ring %s still have current workload %p\n", engine->name, workload); goto out; } /* * pick a workload as current workload * once current workload is set, schedule policy routines * will wait the current workload is finished when trying to * schedule out a vgpu. */ scheduler->current_workload[engine->id] = list_first_entry(workload_q_head(scheduler->current_vgpu, engine), struct intel_vgpu_workload, list); workload = scheduler->current_workload[engine->id]; gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload); atomic_inc(&workload->vgpu->submission.running_workload_num); out: mutex_unlock(&gvt->sched_lock); return workload; } static void update_guest_pdps(struct intel_vgpu *vgpu, u64 ring_context_gpa, u32 pdp[8]) { u64 gpa; int i; gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } static __maybe_unused bool check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m) { if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32; if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) { gvt_dbg_mm("4-level context ppgtt not match LRI command\n"); return false; } return true; } else { /* see comment in LRI handler in cmd_parser.c */ gvt_dbg_mm("invalid shadow mm type\n"); return false; } } static void update_guest_context(struct intel_vgpu_workload *workload) { struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; struct execlist_ring_context *shadow_ring_context; struct intel_context *ctx = workload->req->context; void *context_base; void *src; unsigned long context_gpa, context_page_num; unsigned long gpa_base; /* first gpa of consecutive GPAs */ unsigned long gpa_size; /* size of consecutive GPAs*/ int i; u32 ring_base; u32 head, tail; u16 wrap_count; gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); GEM_BUG_ON(!intel_context_is_pinned(ctx)); head = workload->rb_head; tail = workload->rb_tail; wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; if (tail < head) { if (wrap_count == RB_HEAD_WRAP_CNT_MAX) wrap_count = 0; else wrap_count += 1; } head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; ring_base = rq->engine->mmio_base; vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) context_page_num = 19; context_base = (void *) ctx->lrc_reg_state - (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); /* find consecutive GPAs from gma until the first inconsecutive GPA. * write to the consecutive GPAs from src virtual address */ gpa_size = 0; for (i = 2; i < context_page_num; i++) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << I915_GTT_PAGE_SHIFT)); if (context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("invalid guest context descriptor\n"); return; } if (gpa_size == 0) { gpa_base = context_gpa; src = context_base + (i << I915_GTT_PAGE_SHIFT); } else if (context_gpa != gpa_base + gpa_size) goto write; gpa_size += I915_GTT_PAGE_SIZE; if (i == context_page_num - 1) goto write; continue; write: intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; src = context_base + (i << I915_GTT_PAGE_SHIFT); } intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); shadow_ring_context = (void *) ctx->lrc_reg_state; if (!list_empty(&workload->lri_shadow_mm)) { struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm, struct intel_vgpu_mm, ppgtt_mm.link); GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m)); update_guest_pdps(vgpu, workload->ring_context_gpa, (void *)m->ppgtt_mm.guest_pdps); } #define COPY_REG(name) \ intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) COPY_REG(ctx_ctrl); COPY_REG(ctx_timestamp); #undef COPY_REG intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + sizeof(*shadow_ring_context), (void *)shadow_ring_context + sizeof(*shadow_ring_context), I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); } void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); intel_vgpu_destroy_workload(pos); } clear_bit(engine->id, s->shadow_ctx_desc_updated); } } static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = scheduler->current_workload[ring_id]; struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct i915_request *rq = workload->req; int event; mutex_lock(&vgpu->vgpu_lock); mutex_lock(&gvt->sched_lock); /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. * For the workload w/o request, directly complete the workload. */ if (rq) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); /* If this request caused GPU hang, req->fence.error will * be set to -EIO. Use -EIO to set workload status so * that when this request caused GPU hang, didn't trigger * context switch interrupt to guest. */ if (likely(workload->status == -EINPROGRESS)) { if (workload->req->fence.error == -EIO) workload->status = -EIO; else workload->status = 0; } if (!workload->status && !(vgpu->resetting_eng & BIT(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); } i915_request_put(fetch_and_zero(&workload->req)); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", ring_id, workload, workload->status); scheduler->current_workload[ring_id] = NULL; list_del_init(&workload->list); if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, * and GVT won't inject context switch interrupt to guest. * So this error is a vGPU hang actually to the guest. * According to this we should emunlate a vGPU hang. If * there are pending workloads which are already submitted * from guest, we should clean them up like HW GPU does. * * if it is in middle of engine resetting, the pending * workloads won't be submitted to HW GPU and will be * cleaned up during the resetting process later, so doing * the workload clean up here doesn't have any impact. **/ intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); } workload->complete(workload); intel_vgpu_shadow_mm_unpin(workload); intel_vgpu_destroy_workload(workload); atomic_dec(&s->running_workload_num); wake_up(&scheduler->workload_complete_wq); if (gvt->scheduler.need_reschedule) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->sched_lock); mutex_unlock(&vgpu->vgpu_lock); } static int workload_thread(void *arg) { struct intel_engine_cs *engine = arg; const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9; struct intel_gvt *gvt = engine->i915->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; DEFINE_WAIT_FUNC(wait, woken_wake_function); gvt_dbg_core("workload thread for ring %s started\n", engine->name); while (!kthread_should_stop()) { intel_wakeref_t wakeref; add_wait_queue(&scheduler->waitq[engine->id], &wait); do { workload = pick_next_workload(gvt, engine); if (workload) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } while (!kthread_should_stop()); remove_wait_queue(&scheduler->waitq[engine->id], &wait); if (!workload) break; gvt_dbg_sched("ring %s next workload %p vgpu %d\n", engine->name, workload, workload->vgpu->id); wakeref = intel_runtime_pm_get(engine->uncore->rpm); gvt_dbg_sched("ring %s will dispatch workload %p\n", engine->name, workload); if (need_force_wake) intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); /* * Update the vReg of the vGPU which submitted this * workload. The vGPU may use these registers for checking * the context state. The value comes from GPU commands * in this workload. */ update_vreg_in_ctx(workload); ret = dispatch_workload(workload); if (ret) { vgpu = workload->vgpu; gvt_vgpu_err("fail to dispatch workload, skip\n"); goto complete; } gvt_dbg_sched("ring %s wait workload %p\n", engine->name, workload); i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); complete: gvt_dbg_sched("will complete workload %p, status: %d\n", workload, workload->status); complete_current_workload(gvt, engine->id); if (need_force_wake) intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(engine->uncore->rpm, wakeref); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } return 0; } void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; if (atomic_read(&s->running_workload_num)) { gvt_dbg_sched("wait vgpu idle\n"); wait_event(scheduler->workload_complete_wq, !atomic_read(&s->running_workload_num)); } } void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_engine_cs *engine; enum intel_engine_id i; gvt_dbg_core("clean workload scheduler\n"); for_each_engine(engine, gvt->gt, i) { atomic_notifier_chain_unregister( &engine->context_status_notifier, &gvt->shadow_ctx_notifier_block[i]); kthread_stop(scheduler->thread[i]); } } int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_engine_cs *engine; enum intel_engine_id i; int ret; gvt_dbg_core("init workload scheduler\n"); init_waitqueue_head(&scheduler->workload_complete_wq); for_each_engine(engine, gvt->gt, i) { init_waitqueue_head(&scheduler->waitq[i]); scheduler->thread[i] = kthread_run(workload_thread, engine, "gvt:%s", engine->name); if (IS_ERR(scheduler->thread[i])) { gvt_err("fail to create workload thread\n"); ret = PTR_ERR(scheduler->thread[i]); goto err; } gvt->shadow_ctx_notifier_block[i].notifier_call = shadow_context_status_change; atomic_notifier_chain_register(&engine->context_status_notifier, &gvt->shadow_ctx_notifier_block[i]); } return 0; err: intel_gvt_clean_workload_scheduler(gvt); return ret; } static void i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, struct i915_ppgtt *ppgtt) { int i; if (i915_vm_is_4lvl(&ppgtt->vm)) { set_dma_address(ppgtt->pd, s->i915_context_pml4); } else { for (i = 0; i < GEN8_3LVL_PDPES; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); set_dma_address(pd, s->i915_context_pdps[i]); } } } /** * intel_vgpu_clean_submission - free submission-related resource for vGPU * @vgpu: a vGPU * * This function is called when a vGPU is being destroyed. * */ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; enum intel_engine_id id; intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->gt, id) intel_context_put(s->shadow[id]); kmem_cache_destroy(s->workloads); } /** * intel_vgpu_reset_submission - reset submission-related resource for vGPU * @vgpu: a vGPU * @engine_mask: engines expected to be reset * * This function is called when a vGPU is being destroyed. * */ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; if (!s->active) return; intel_vgpu_clean_workloads(vgpu, engine_mask); s->ops->reset(vgpu, engine_mask); } static void i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, struct i915_ppgtt *ppgtt) { int i; if (i915_vm_is_4lvl(&ppgtt->vm)) { s->i915_context_pml4 = px_dma(ppgtt->pd); } else { for (i = 0; i < GEN8_3LVL_PDPES; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); s->i915_context_pdps[i] = px_dma(pd); } } } /** * intel_vgpu_setup_submission - setup submission-related resource for vGPU * @vgpu: a vGPU * * This function is called when a vGPU is being created. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; struct i915_ppgtt *ppgtt; enum intel_engine_id i; int ret; ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); i915_context_ppgtt_root_save(s, ppgtt); for_each_engine(engine, vgpu->gvt->gt, i) { struct intel_context *ce; INIT_LIST_HEAD(&s->workload_q_head[i]); s->shadow[i] = ERR_PTR(-EINVAL); ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_shadow_ctx; } i915_vm_put(ce->vm); ce->vm = i915_vm_get(&ppgtt->vm); intel_context_set_single_submission(ce); /* Max ring buffer size */ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) ce->ring_size = SZ_2M; s->shadow[i] = ce; } bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, offsetof(struct intel_vgpu_workload, rb_tail), sizeof_field(struct intel_vgpu_workload, rb_tail), NULL); if (!s->workloads) { ret = -ENOMEM; goto out_shadow_ctx; } atomic_set(&s->running_workload_num, 0); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); memset(s->last_ctx, 0, sizeof(s->last_ctx)); i915_vm_put(&ppgtt->vm); return 0; out_shadow_ctx: i915_context_ppgtt_root_restore(s, ppgtt); for_each_engine(engine, vgpu->gvt->gt, i) { if (IS_ERR(s->shadow[i])) break; intel_context_put(s->shadow[i]); } i915_vm_put(&ppgtt->vm); return ret; } /** * intel_vgpu_select_submission_ops - select virtual submission interface * @vgpu: a vGPU * @engine_mask: either ALL_ENGINES or target engine mask * @interface: expected vGPU virtual submission interface * * This function is called when guest configures submission interface. * * Returns: * Zero on success, negative error code if failed. * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask, unsigned int interface) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_submission *s = &vgpu->submission; const struct intel_vgpu_submission_ops *ops[] = { [INTEL_VGPU_EXECLIST_SUBMISSION] = &intel_vgpu_execlist_submission_ops, }; int ret; if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops))) return -EINVAL; if (drm_WARN_ON(&i915->drm, interface == 0 && engine_mask != ALL_ENGINES)) return -EINVAL; if (s->active) s->ops->clean(vgpu, engine_mask); if (interface == 0) { s->ops = NULL; s->virtual_submission_interface = 0; s->active = false; gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); return 0; } ret = ops[interface]->init(vgpu, engine_mask); if (ret) return ret; s->ops = ops[interface]; s->virtual_submission_interface = interface; s->active = true; gvt_dbg_core("vgpu%d: activate ops [ %s ]\n", vgpu->id, s->ops->name); return 0; } /** * intel_vgpu_destroy_workload - destroy a vGPU workload * @workload: workload to destroy * * This function is called when destroy a vGPU workload. * */ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; intel_context_unpin(s->shadow[workload->engine->id]); release_shadow_batch_buffer(workload); release_shadow_wa_ctx(&workload->wa_ctx); if (!list_empty(&workload->lri_shadow_mm)) { struct intel_vgpu_mm *m, *mm; list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm, ppgtt_mm.link) { list_del(&m->ppgtt_mm.link); intel_vgpu_mm_put(m); } } GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm)); if (workload->shadow_mm) intel_vgpu_mm_put(workload->shadow_mm); kmem_cache_free(s->workloads, workload); } static struct intel_vgpu_workload * alloc_workload(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_workload *workload; workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); if (!workload) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&workload->list); INIT_LIST_HEAD(&workload->shadow_bb); INIT_LIST_HEAD(&workload->lri_shadow_mm); init_waitqueue_head(&workload->shadow_ctx_status_wq); atomic_set(&workload->shadow_ctx_active, 0); workload->status = -EINPROGRESS; workload->vgpu = vgpu; return workload; } #define RING_CTX_OFF(x) \ offsetof(struct execlist_ring_context, x) static void read_guest_pdps(struct intel_vgpu *vgpu, u64 ring_context_gpa, u32 pdp[8]) { u64 gpa; int i; gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) intel_gvt_read_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } static int prepare_mm(struct intel_vgpu_workload *workload) { struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct intel_vgpu_mm *mm; struct intel_vgpu *vgpu = workload->vgpu; enum intel_gvt_gtt_type root_entry_type; u64 pdps[GVT_RING_CTX_NR_PDPS]; switch (desc->addressing_mode) { case 1: /* legacy 32-bit */ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; break; case 3: /* legacy 64-bit */ root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; break; default: gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); return -EINVAL; } read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); if (IS_ERR(mm)) return PTR_ERR(mm); workload->shadow_mm = mm; return 0; } #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU * @engine: the engine * @desc: a guest context descriptor * * This function is called when creating a vGPU workload. * * Returns: * struct intel_vgpu_workload * on success, negative error code in * pointer if failed. * */ struct intel_vgpu_workload * intel_vgpu_create_workload(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine, struct execlist_ctx_descriptor_format *desc) { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, engine); struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; u64 ring_context_gpa; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; u32 guest_head; int ret; ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT)); if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); return ERR_PTR(-EINVAL); } intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_header.val), &head, 4); intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); guest_head = head; head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; list_for_each_entry_reverse(last_workload, q, list) { if (same_context(&last_workload->ctx_desc, desc)) { gvt_dbg_el("ring %s cur workload == last\n", engine->name); gvt_dbg_el("ctx head %x real head %lx\n", head, last_workload->rb_tail); /* * cannot use guest context head pointer here, * as it might not be updated at this time */ head = last_workload->rb_tail; break; } } gvt_dbg_el("ring %s begin a new workload\n", engine->name); /* record some ring buffer register values for scan and shadow */ intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_start.val), &start, 4); intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_ctrl.val), &ctl, 4); intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); if (!intel_gvt_ggtt_validate_range(vgpu, start, _RING_CTL_BUF_SIZE(ctl))) { gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); return ERR_PTR(-EINVAL); } workload = alloc_workload(vgpu); if (IS_ERR(workload)) return workload; workload->engine = engine; workload->ctx_desc = *desc; workload->ring_context_gpa = ring_context_gpa; workload->rb_head = head; workload->guest_rb_head = guest_head; workload->rb_tail = tail; workload->rb_start = start; workload->rb_ctl = ctl; if (engine->id == RCS0) { intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); workload->wa_ctx.indirect_ctx.guest_gma = indirect_ctx & INDIRECT_CTX_ADDR_MASK; workload->wa_ctx.indirect_ctx.size = (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * CACHELINE_BYTES; if (workload->wa_ctx.indirect_ctx.size != 0) { if (!intel_gvt_ggtt_validate_range(vgpu, workload->wa_ctx.indirect_ctx.guest_gma, workload->wa_ctx.indirect_ctx.size)) { gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", workload->wa_ctx.indirect_ctx.guest_gma); kmem_cache_free(s->workloads, workload); return ERR_PTR(-EINVAL); } } workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; workload->wa_ctx.per_ctx.valid = per_ctx & 1; if (workload->wa_ctx.per_ctx.valid) { if (!intel_gvt_ggtt_validate_range(vgpu, workload->wa_ctx.per_ctx.guest_gma, CACHELINE_BYTES)) { gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", workload->wa_ctx.per_ctx.guest_gma); kmem_cache_free(s->workloads, workload); return ERR_PTR(-EINVAL); } } } gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n", workload, engine->name, head, tail, start, ctl); ret = prepare_mm(workload); if (ret) { kmem_cache_free(s->workloads, workload); return ERR_PTR(ret); } /* Only scan and shadow the first workload in the queue * as there is only one pre-allocated buf-obj for shadow. */ if (list_empty(q)) { intel_wakeref_t wakeref; with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref) ret = intel_gvt_scan_and_shadow_workload(workload); } if (ret) { if (vgpu_is_vm_unhealthy(ret)) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); intel_vgpu_destroy_workload(workload); return ERR_PTR(ret); } ret = intel_context_pin(s->shadow[engine->id]); if (ret) { intel_vgpu_destroy_workload(workload); return ERR_PTR(ret); } return workload; } /** * intel_vgpu_queue_workload - Qeue a vGPU workload * @workload: the workload to queue in */ void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) { list_add_tail(&workload->list, workload_q_head(workload->vgpu, workload->engine)); intel_gvt_kick_schedule(workload->vgpu->gvt); wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); }
linux-master
drivers/gpu/drm/i915/gvt/scheduler.c
/* * GTT virtualization * * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Zhi Wang <[email protected]> * Zhenyu Wang <[email protected]> * Xiao Zheng <[email protected]> * * Contributors: * Min He <[email protected]> * Bing Niu <[email protected]> * */ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" #include "gt/intel_gt_regs.h" #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) #else #define gvt_vdbg_mm(fmt, args...) #endif static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; /* * validate a gm address and related range size, * translate it to host gm address */ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) { if (size == 0) return vgpu_gmadr_is_valid(vgpu, addr); if (vgpu_gmadr_is_aperture(vgpu, addr) && vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) return true; else if (vgpu_gmadr_is_hidden(vgpu, addr) && vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) return true; gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n", addr, size); return false; } /* translate a guest gmadr to host gmadr */ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), "invalid guest gmadr %llx\n", g_addr)) return -EACCES; if (vgpu_gmadr_is_aperture(vgpu, g_addr)) *h_addr = vgpu_aperture_gmadr_base(vgpu) + (g_addr - vgpu_aperture_offset(vgpu)); else *h_addr = vgpu_hidden_gmadr_base(vgpu) + (g_addr - vgpu_hidden_offset(vgpu)); return 0; } /* translate a host gmadr to guest gmadr */ int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), "invalid host gmadr %llx\n", h_addr)) return -EACCES; if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) *g_addr = vgpu_aperture_gmadr_base(vgpu) + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); else *g_addr = vgpu_hidden_gmadr_base(vgpu) + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); return 0; } int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, unsigned long *h_index) { u64 h_addr; int ret; ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, &h_addr); if (ret) return ret; *h_index = h_addr >> I915_GTT_PAGE_SHIFT; return 0; } int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index) { u64 g_addr; int ret; ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, &g_addr); if (ret) return ret; *g_index = g_addr >> I915_GTT_PAGE_SHIFT; return 0; } #define gtt_type_is_entry(type) \ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ && type != GTT_TYPE_PPGTT_PTE_ENTRY \ && type != GTT_TYPE_PPGTT_ROOT_ENTRY) #define gtt_type_is_pt(type) \ (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) #define gtt_type_is_pte_pt(type) \ (type == GTT_TYPE_PPGTT_PTE_PT) #define gtt_type_is_root_pointer(type) \ (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) #define gtt_init_entry(e, t, p, v) do { \ (e)->type = t; \ (e)->pdev = p; \ memcpy(&(e)->val64, &v, sizeof(v)); \ } while (0) /* * Mappings between GTT_TYPE* enumerations. * Following information can be found according to the given type: * - type of next level page table * - type of entry inside this level page table * - type of entry with PSE set * * If the given type doesn't have such a kind of information, * e.g. give a l4 root entry type, then request to get its PSE type, * give a PTE page table type, then request to get its next level page * table type, as we know l4 root entry doesn't have a PSE bit, * and a PTE page table doesn't have a next level page table type, * GTT_TYPE_INVALID will be returned. This is useful when traversing a * page table. */ struct gtt_type_table_entry { int entry_type; int pt_type; int next_pt_type; int pse_entry_type; }; #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ [type] = { \ .entry_type = e_type, \ .pt_type = cpt_type, \ .next_pt_type = npt_type, \ .pse_entry_type = pse_type, \ } static const struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PML4_ENTRY, GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, GTT_TYPE_PPGTT_PML4_ENTRY, GTT_TYPE_PPGTT_PML4_PT, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, GTT_TYPE_PPGTT_ROOT_L3_ENTRY, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), /* We take IPS bit as 'PSE' for PTE level. */ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_2M_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, GTT_TYPE_PPGTT_PDP_ENTRY, GTT_TYPE_PPGTT_PDP_PT, GTT_TYPE_INVALID, GTT_TYPE_PPGTT_PTE_1G_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, GTT_TYPE_GGTT_PTE, GTT_TYPE_INVALID, GTT_TYPE_INVALID, GTT_TYPE_INVALID), }; static inline int get_next_pt_type(int type) { return gtt_type_table[type].next_pt_type; } static inline int get_entry_type(int type) { return gtt_type_table[type].entry_type; } static inline int get_pse_type(int type) { return gtt_type_table[type].pse_entry_type; } static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; return readq(addr); } static void ggtt_invalidate(struct intel_gt *gt) { mmio_hw_access_pre(gt); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); mmio_hw_access_post(gt); } static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) { void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; writeq(pte, addr); } static inline int gtt_get_entry64(void *pt, struct intel_gvt_gtt_entry *e, unsigned long index, bool hypervisor_access, unsigned long gpa, struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; int ret; if (WARN_ON(info->gtt_entry_size != 8)) return -EINVAL; if (hypervisor_access) { ret = intel_gvt_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) return ret; } else if (!pt) { e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); } else { e->val64 = *((u64 *)pt + index); } return 0; } static inline int gtt_set_entry64(void *pt, struct intel_gvt_gtt_entry *e, unsigned long index, bool hypervisor_access, unsigned long gpa, struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; int ret; if (WARN_ON(info->gtt_entry_size != 8)) return -EINVAL; if (hypervisor_access) { ret = intel_gvt_write_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) return ret; } else if (!pt) { write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); } else { *((u64 *)pt + index) = e->val64; } return 0; } #define GTT_HAW 46 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ #define GTT_64K_PTE_STRIDE 16 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { unsigned long pfn; if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; else pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; return pfn; } static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) { if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { e->val64 &= ~ADDR_1G_MASK; pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { e->val64 &= ~ADDR_2M_MASK; pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { e->val64 &= ~ADDR_64K_MASK; pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); } else { e->val64 &= ~ADDR_4K_MASK; pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); } e->val64 |= (pfn << PAGE_SHIFT); } static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) { return !!(e->val64 & _PAGE_PSE); } static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) { if (gen8_gtt_test_pse(e)) { switch (e->type) { case GTT_TYPE_PPGTT_PTE_2M_ENTRY: e->val64 &= ~_PAGE_PSE; e->type = GTT_TYPE_PPGTT_PDE_ENTRY; break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: e->type = GTT_TYPE_PPGTT_PDP_ENTRY; e->val64 &= ~_PAGE_PSE; break; default: WARN_ON(1); } } } static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) { if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) return false; return !!(e->val64 & GEN8_PDE_IPS_64K); } static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) { if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) return; e->val64 &= ~GEN8_PDE_IPS_64K; } static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) { /* * i915 writes PDP root pointer registers without present bit, * it also works, so we need to treat root pointer entry * specifically. */ if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else return (e->val64 & GEN8_PAGE_PRESENT); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { e->val64 &= ~GEN8_PAGE_PRESENT; } static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) { e->val64 |= GEN8_PAGE_PRESENT; } static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) { return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); } static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) { e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; } static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) { e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; } /* * Per-platform GMA routines. */ static unsigned long gma_to_ggtt_pte_index(unsigned long gma) { unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); trace_gma_index(__func__, gma, x); return x; } #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ { \ unsigned long x = (exp); \ trace_gma_index(__func__, gma, x); \ return x; \ } DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, .clear_pse = gen8_gtt_clear_pse, .clear_ips = gen8_gtt_clear_ips, .test_ips = gen8_gtt_test_ips, .clear_64k_splited = gen8_gtt_clear_64k_splited, .set_64k_splited = gen8_gtt_set_64k_splited, .test_64k_splited = gen8_gtt_test_64k_splited, .get_pfn = gen8_gtt_get_pfn, .set_pfn = gen8_gtt_set_pfn, }; static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, .gma_to_pte_index = gen8_gma_to_pte_index, .gma_to_pde_index = gen8_gma_to_pde_index, .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, .gma_to_pml4_index = gen8_gma_to_pml4_index, }; /* Update entry type per pse and ips bit. */ static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops, struct intel_gvt_gtt_entry *entry, bool ips) { switch (entry->type) { case GTT_TYPE_PPGTT_PDE_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: if (pte_ops->test_pse(entry)) entry->type = get_pse_type(entry->type); break; case GTT_TYPE_PPGTT_PTE_4K_ENTRY: if (ips) entry->type = get_pse_type(entry->type); break; default: GEM_BUG_ON(!gtt_type_is_entry(entry->type)); } GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); } /* * MM helpers. */ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); entry->type = mm->ppgtt_mm.root_entry_type; pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, entry, index, false, 0, mm->vgpu); update_entry_type_for_real(pte_ops, entry, false); } static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { _ppgtt_get_root_entry(mm, entry, index, true); } static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { _ppgtt_get_root_entry(mm, entry, index, false); } static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, entry, index, false, 0, mm->vgpu); } static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { _ppgtt_set_root_entry(mm, entry, index, false); } static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); entry->type = GTT_TYPE_GGTT_PTE; pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, false, 0, mm->vgpu); } static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, false, 0, mm->vgpu); } static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); } static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; } pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); } /* * PPGTT shadow page table helpers. */ static inline int ppgtt_spt_get_entry( struct intel_vgpu_ppgtt_spt *spt, void *page_table, int type, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; int ret; e->type = get_entry_type(type); if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) return -EINVAL; ret = ops->get_entry(page_table, e, index, guest, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); if (ret) return ret; update_entry_type_for_real(ops, e, guest ? spt->guest_page.pde_ips : false); gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", type, e->type, index, e->val64); return 0; } static inline int ppgtt_spt_set_entry( struct intel_vgpu_ppgtt_spt *spt, void *page_table, int type, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) return -EINVAL; gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", type, e->type, index, e->val64); return ops->set_entry(page_table, e, index, guest, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, spt->vgpu); } #define ppgtt_get_guest_entry(spt, e, index) \ ppgtt_spt_get_entry(spt, NULL, \ spt->guest_page.type, e, index, true) #define ppgtt_set_guest_entry(spt, e, index) \ ppgtt_spt_set_entry(spt, NULL, \ spt->guest_page.type, e, index, true) #define ppgtt_get_shadow_entry(spt, e, index) \ ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ spt->shadow_page.type, e, index, false) #define ppgtt_set_shadow_entry(spt, e, index) \ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ spt->shadow_page.type, e, index, false) static void *alloc_spt(gfp_t gfp_mask) { struct intel_vgpu_ppgtt_spt *spt; spt = kzalloc(sizeof(*spt), gfp_mask); if (!spt) return NULL; spt->shadow_page.page = alloc_page(gfp_mask); if (!spt->shadow_page.page) { kfree(spt); return NULL; } return spt; } static void free_spt(struct intel_vgpu_ppgtt_spt *spt) { __free_page(spt->shadow_page.page); kfree(spt); } static int detach_oos_page(struct intel_vgpu *vgpu, struct intel_vgpu_oos_page *oos_page); static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) { struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev; trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, DMA_BIDIRECTIONAL); radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); if (spt->guest_page.gfn) { if (spt->guest_page.oos_page) detach_oos_page(spt->vgpu, spt->guest_page.oos_page); intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); } list_del_init(&spt->post_shadow_list); free_spt(spt); } static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) { struct intel_vgpu_ppgtt_spt *spt, *spn; struct radix_tree_iter iter; LIST_HEAD(all_spt); void __rcu **slot; rcu_read_lock(); radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { spt = radix_tree_deref_slot(slot); list_move(&spt->post_shadow_list, &all_spt); } rcu_read_unlock(); list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) ppgtt_free_spt(spt); } static int ppgtt_handle_guest_write_page_table_bytes( struct intel_vgpu_ppgtt_spt *spt, u64 pa, void *p_data, int bytes); static int ppgtt_write_protection_handler( struct intel_vgpu_page_track *page_track, u64 gpa, void *data, int bytes) { struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; int ret; if (bytes != 4 && bytes != 8) return -EINVAL; ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); if (ret) return ret; return ret; } /* Find a spt by guest gfn. */ static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( struct intel_vgpu *vgpu, unsigned long gfn) { struct intel_vgpu_page_track *track; track = intel_vgpu_find_page_track(vgpu, gfn); if (track && track->handler == ppgtt_write_protection_handler) return track->priv_data; return NULL; } /* Find the spt by shadow page mfn. */ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( struct intel_vgpu *vgpu, unsigned long mfn) { return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); } static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); /* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { struct device *kdev = vgpu->gvt->gt->i915->drm.dev; struct intel_vgpu_ppgtt_spt *spt = NULL; dma_addr_t daddr; int ret; retry: spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); if (!spt) { if (reclaim_one_ppgtt_mm(vgpu->gvt)) goto retry; gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); return ERR_PTR(-ENOMEM); } spt->vgpu = vgpu; atomic_set(&spt->refcount, 1); INIT_LIST_HEAD(&spt->post_shadow_list); /* * Init shadow_page. */ spt->shadow_page.type = type; daddr = dma_map_page(kdev, spt->shadow_page.page, 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(kdev, daddr)) { gvt_vgpu_err("fail to map dma addr\n"); ret = -EINVAL; goto err_free_spt; } spt->shadow_page.vaddr = page_address(spt->shadow_page.page); spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); if (ret) goto err_unmap_dma; return spt; err_unmap_dma: dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL); err_free_spt: free_spt(spt); return ERR_PTR(ret); } /* Allocate shadow page table associated with specific gfn. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, unsigned long gfn, bool guest_pde_ips) { struct intel_vgpu_ppgtt_spt *spt; int ret; spt = ppgtt_alloc_spt(vgpu, type); if (IS_ERR(spt)) return spt; /* * Init guest_page. */ ret = intel_vgpu_register_page_track(vgpu, gfn, ppgtt_write_protection_handler, spt); if (ret) { ppgtt_free_spt(spt); return ERR_PTR(ret); } spt->guest_page.type = type; spt->guest_page.gfn = gfn; spt->guest_page.pde_ips = guest_pde_ips; trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); return spt; } #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) #define pt_entries(spt) \ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); \ i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_guest_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_present_shadow_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); \ i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_shadow_entry(spt, e, i) \ for (i = 0; i < pt_entries(spt); \ i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ if (!ppgtt_get_shadow_entry(spt, e, i)) static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); atomic_inc(&spt->refcount); } static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); return atomic_dec_return(&spt->refcount); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *e) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; enum intel_gvt_gtt_type cur_pt_type; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { cur_pt_type = get_next_pt_type(e->type); if (!gtt_type_is_pt(cur_pt_type) || !gtt_type_is_pt(cur_pt_type + 1)) { drm_WARN(&i915->drm, 1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type); return -EINVAL; } cur_pt_type += 1; if (ops->get_pfn(e) == vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) return 0; } s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); if (!s) { gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", ops->get_pfn(e)); return -ENXIO; } return ppgtt_invalidate_spt(s); } static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *entry) { struct intel_vgpu *vgpu = spt->vgpu; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; int type; pfn = ops->get_pfn(entry); type = spt->shadow_page.type; /* Uninitialized spte or unshadowed spte. */ if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_entry e; unsigned long index; int ret; trace_spt_change(spt->vgpu->id, "die", spt, spt->guest_page.gfn, spt->shadow_page.type); if (ppgtt_put_spt(spt) > 0) return 0; for_each_present_shadow_entry(spt, &e, index) { switch (e.type) { case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(spt, &e); break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: /* We don't setup 64K shadow entry so far. */ WARN(1, "suspicious 64K gtt entry\n"); continue; case GTT_TYPE_PPGTT_PTE_2M_ENTRY: gvt_vdbg_mm("invalidate 2M entry\n"); continue; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: WARN(1, "GVT doesn't support 1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: case GTT_TYPE_PPGTT_PDE_ENTRY: gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); ret = ppgtt_invalidate_spt_by_shadow_entry( spt->vgpu, &e); if (ret) goto fail; break; default: GEM_BUG_ON(1); } } trace_spt_change(spt->vgpu->id, "release", spt, spt->guest_page.gfn, spt->shadow_page.type); ppgtt_free_spt(spt); return 0; fail: gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", spt, e.val64, e.type); return ret; } static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (GRAPHICS_VER(dev_priv) == 9) { u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & GAMW_ECO_ENABLE_64K_IPS_FIELD; return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; } else if (GRAPHICS_VER(dev_priv) >= 11) { /* 64K paging only controlled by IPS bit in PTE now. */ return true; } else return false; } static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; bool ips = false; int ret; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); if (spt) { ppgtt_get_spt(spt); if (ips != spt->guest_page.pde_ips) { spt->guest_page.pde_ips = ips; gvt_dbg_mm("reshadow PDE since ips changed\n"); clear_page(spt->shadow_page.vaddr); ret = ppgtt_populate_spt(spt); if (ret) { ppgtt_put_spt(spt); goto err; } } } else { int type = get_next_pt_type(we->type); if (!gtt_type_is_pt(type)) { ret = -EINVAL; goto err; } spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); goto err; } ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); if (ret) goto err_free_spt; ret = ppgtt_populate_spt(spt); if (ret) goto err_free_spt; trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, spt->shadow_page.type); } return spt; err_free_spt: ppgtt_free_spt(spt); spt = NULL; err: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); return ERR_PTR(ret); } static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) { const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; se->type = ge->type; se->val64 = ge->val64; /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) ops->clear_ips(se); ops->set_pfn(se, s->shadow_page.mfn); } static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *sub_spt; struct intel_gvt_gtt_entry sub_se; unsigned long start_gfn; dma_addr_t dma_addr; unsigned long sub_index; int ret; gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); start_gfn = ops->get_pfn(se); sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); if (IS_ERR(sub_spt)) return PTR_ERR(sub_spt); for_each_shadow_entry(sub_spt, &sub_se, sub_index) { ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, PAGE_SIZE, &dma_addr); if (ret) goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ sub_se.val64 &= ~_PAGE_PAT; sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); } /* Clear dirty field. */ se->val64 &= ~_PAGE_DIRTY; ops->clear_pse(se); ops->clear_ips(se); ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; err: /* Cancel the existing addess mappings of DMA addr. */ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(sub_spt, &sub_se); } /* Release the new allocated spt. */ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, sub_spt->guest_page.gfn, sub_spt->shadow_page.type); ppgtt_free_spt(sub_spt); return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = *se; unsigned long start_gfn; dma_addr_t dma_addr; int i, ret; gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); start_gfn = ops->get_pfn(se); entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; ops->set_64k_splited(&entry); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, PAGE_SIZE, &dma_addr); if (ret) return ret; ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(spt, &entry, index + i); } return 0; } static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; unsigned long gfn; dma_addr_t dma_addr; int ret; if (!pte_ops->test_present(ge)) return 0; gfn = pte_ops->get_pfn(ge); switch (ge->type) { case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); if (ret) return -ENXIO; break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: gvt_vdbg_mm("shadow 64K gtt entry\n"); /* * The layout of 64K page is special, the page size is * controlled by uper PDE. To be simple, we always split * 64K page to smaller 4K pages in shadow PT. */ return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: gvt_vdbg_mm("shadow 2M gtt entry\n"); if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || intel_gvt_dma_map_guest_page(vgpu, gfn, I915_GTT_PAGE_SIZE_2M, &dma_addr)) return split_2MB_gtt_entry(vgpu, spt, index, &se); break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); return -EINVAL; } /* Successfully shadowed a 4K or 2M page (without splitting). */ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); ppgtt_set_shadow_entry(spt, &se, index); return 0; } static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; unsigned long i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, spt->guest_page.gfn, spt->shadow_page.type); for_each_present_guest_entry(spt, &ge, i) { if (gtt_type_is_pt(get_next_pt_type(ge.type))) { s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); if (IS_ERR(s)) { ret = PTR_ERR(s); goto fail; } ppgtt_get_shadow_entry(spt, &se, i); ppgtt_generate_shadow_entry(&se, s, &ge); ppgtt_set_shadow_entry(spt, &se, i); } else { ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); if (ret) goto fail; } } return 0; fail: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, ge.val64, ge.type); return ret; } static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *se, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int ret; trace_spt_guest_change(spt->vgpu->id, "remove", spt, spt->shadow_page.type, se->val64, index); gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", se->type, index, se->val64); if (!ops->test_present(se)) return 0; if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) return 0; if (gtt_type_is_pt(get_next_pt_type(se->type))) { struct intel_vgpu_ppgtt_spt *s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); if (!s) { gvt_vgpu_err("fail to find guest page\n"); ret = -ENXIO; goto fail; } ret = ppgtt_invalidate_spt(s); if (ret) goto fail; } else { /* We don't setup 64K shadow entry so far. */ WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, "suspicious 64K entry\n"); ppgtt_invalidate_pte(spt, se); } return 0; fail: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, se->val64, se->type); return ret; } static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt_gtt_entry m; struct intel_vgpu_ppgtt_spt *s; int ret; trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, we->val64, index); gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", we->type, index, we->val64); if (gtt_type_is_pt(get_next_pt_type(we->type))) { s = ppgtt_populate_spt_by_guest_entry(vgpu, we); if (IS_ERR(s)) { ret = PTR_ERR(s); goto fail; } ppgtt_get_shadow_entry(spt, &m, index); ppgtt_generate_shadow_entry(&m, s, we); ppgtt_set_shadow_entry(spt, &m, index); } else { ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); if (ret) goto fail; } return 0; fail: gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); return ret; } static int sync_oos_page(struct intel_vgpu *vgpu, struct intel_vgpu_oos_page *oos_page) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; struct intel_gvt_gtt_entry old, new; int index; int ret; trace_oos_change(vgpu->id, "sync", oos_page->id, spt, spt->guest_page.type); old.type = new.type = get_entry_type(spt->guest_page.type); old.val64 = new.val64 = 0; for (index = 0; index < (I915_GTT_PAGE_SIZE >> info->gtt_entry_size_shift); index++) { ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); ops->get_entry(NULL, &new, index, true, spt->guest_page.gfn << PAGE_SHIFT, vgpu); if (old.val64 == new.val64 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) continue; trace_oos_sync(vgpu->id, oos_page->id, spt, spt->guest_page.type, new.val64, index); ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); if (ret) return ret; ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); } spt->guest_page.write_cnt = 0; list_del_init(&spt->post_shadow_list); return 0; } static int detach_oos_page(struct intel_vgpu *vgpu, struct intel_vgpu_oos_page *oos_page) { struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; trace_oos_change(vgpu->id, "detach", oos_page->id, spt, spt->guest_page.type); spt->guest_page.write_cnt = 0; spt->guest_page.oos_page = NULL; oos_page->spt = NULL; list_del_init(&oos_page->vm_list); list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); return 0; } static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, struct intel_vgpu_ppgtt_spt *spt) { struct intel_gvt *gvt = spt->vgpu->gvt; int ret; ret = intel_gvt_read_gpa(spt->vgpu, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) return ret; oos_page->spt = spt; spt->guest_page.oos_page = oos_page; list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); trace_oos_change(spt->vgpu->id, "attach", oos_page->id, spt, spt->guest_page.type); return 0; } static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; int ret; ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); if (ret) return ret; trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, spt, spt->guest_page.type); list_del_init(&oos_page->vm_list); return sync_oos_page(spt->vgpu, oos_page); } static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_gvt *gvt = spt->vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; int ret; WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); if (list_empty(&gtt->oos_page_free_list_head)) { oos_page = container_of(gtt->oos_page_use_list_head.next, struct intel_vgpu_oos_page, list); ret = ppgtt_set_guest_page_sync(oos_page->spt); if (ret) return ret; ret = detach_oos_page(spt->vgpu, oos_page); if (ret) return ret; } else oos_page = container_of(gtt->oos_page_free_list_head.next, struct intel_vgpu_oos_page, list); return attach_oos_page(oos_page, spt); } static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) return -EINVAL; trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, spt, spt->guest_page.type); list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); } /** * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU * @vgpu: a vGPU * * This function is called before submitting a guest workload to host, * to sync all the out-of-synced shadow for vGPU * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_oos_page *oos_page; int ret; if (!enable_out_of_sync) return 0; list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { oos_page = container_of(pos, struct intel_vgpu_oos_page, vm_list); ret = ppgtt_set_guest_page_sync(oos_page->spt); if (ret) return ret; } return 0; } /* * The heart of PPGTT shadow page table. */ static int ppgtt_handle_guest_write_page_table( struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; int type = spt->shadow_page.type; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; int i, ret; new_present = ops->test_present(we); /* * Adding the new entry first and then removing the old one, that can * guarantee the ppgtt table is validated during the window between * adding and removal. */ ppgtt_get_shadow_entry(spt, &old_se, index); if (new_present) { ret = ppgtt_handle_guest_entry_add(spt, we, index); if (ret) goto fail; } ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); if (ret) goto fail; if (!new_present) { /* For 64KB splited entries, we need clear them all. */ if (ops->test_64k_splited(&old_se) && !(index % GTT_64K_PTE_STRIDE)) { gvt_vdbg_mm("remove splited 64K shadow entries\n"); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { ops->clear_64k_splited(&old_se); ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &old_se, index + i); } } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { ops->clear_pse(&old_se); ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &old_se, index); } else { ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &old_se, index); } } return 0; fail: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", spt, we->val64, we->type); return ret; } static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) { return enable_out_of_sync && gtt_type_is_pte_pt(spt->guest_page.type) && spt->guest_page.write_cnt >= 2; } static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, unsigned long index) { set_bit(index, spt->post_shadow_bitmap); if (!list_empty(&spt->post_shadow_list)) return; list_add_tail(&spt->post_shadow_list, &spt->vgpu->gtt.post_shadow_list_head); } /** * intel_vgpu_flush_post_shadow - flush the post shadow transactions * @vgpu: a vGPU * * This function is called before submitting a guest workload to host, * to flush all the post shadows for a vGPU. * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_ppgtt_spt *spt; struct intel_gvt_gtt_entry ge; unsigned long index; int ret; list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { spt = container_of(pos, struct intel_vgpu_ppgtt_spt, post_shadow_list); for_each_set_bit(index, spt->post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE) { ppgtt_get_guest_entry(spt, &ge, index); ret = ppgtt_handle_guest_write_page_table(spt, &ge, index); if (ret) return ret; clear_bit(index, spt->post_shadow_bitmap); } list_del_init(&spt->post_shadow_list); } return 0; } static int ppgtt_handle_guest_write_page_table_bytes( struct intel_vgpu_ppgtt_spt *spt, u64 pa, void *p_data, int bytes) { struct intel_vgpu *vgpu = spt->vgpu; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt_gtt_entry we, se; unsigned long index; int ret; index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; ppgtt_get_guest_entry(spt, &we, index); /* * For page table which has 64K gtt entry, only PTE#0, PTE#16, * PTE#32, ... PTE#496 are used. Unused PTEs update should be * ignored. */ if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && (index % GTT_64K_PTE_STRIDE)) { gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", index); return 0; } if (bytes == info->gtt_entry_size) { ret = ppgtt_handle_guest_write_page_table(spt, &we, index); if (ret) return ret; } else { if (!test_bit(index, spt->post_shadow_bitmap)) { int type = spt->shadow_page.type; ppgtt_get_shadow_entry(spt, &se, index); ret = ppgtt_handle_guest_entry_removal(spt, &se, index); if (ret) return ret; ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &se, index); } ppgtt_set_post_shadow(spt, index); } if (!enable_out_of_sync) return 0; spt->guest_page.write_cnt++; if (spt->guest_page.oos_page) ops->set_entry(spt->guest_page.oos_page->mem, &we, index, false, 0, vgpu); if (can_do_out_of_sync(spt)) { if (!spt->guest_page.oos_page) ppgtt_allocate_oos_page(spt); ret = ppgtt_set_guest_page_oos(spt); if (ret < 0) return ret; } return 0; } static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_gvt_gtt_entry se; int index; if (!mm->ppgtt_mm.shadowed) return; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { ppgtt_get_shadow_root_entry(mm, &se, index); if (!ops->test_present(&se)) continue; ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); se.val64 = 0; ppgtt_set_shadow_root_entry(mm, &se, index); trace_spt_guest_change(vgpu->id, "destroy root pointer", NULL, se.type, se.val64, index); } mm->ppgtt_mm.shadowed = false; } static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_vgpu_ppgtt_spt *spt; struct intel_gvt_gtt_entry ge, se; int index, ret; if (mm->ppgtt_mm.shadowed) return 0; if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -EINVAL; mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { ppgtt_get_guest_root_entry(mm, &ge, index); if (!ops->test_present(&ge)) continue; trace_spt_guest_change(vgpu->id, __func__, NULL, ge.type, ge.val64, index); spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); if (IS_ERR(spt)) { gvt_vgpu_err("fail to populate guest root pointer\n"); ret = PTR_ERR(spt); goto fail; } ppgtt_generate_shadow_entry(&se, spt, &ge); ppgtt_set_shadow_root_entry(mm, &se, index); trace_spt_guest_change(vgpu->id, "populate root pointer", NULL, se.type, se.val64, index); } return 0; fail: invalidate_ppgtt_mm(mm); return ret; } static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) { struct intel_vgpu_mm *mm; mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) return NULL; mm->vgpu = vgpu; kref_init(&mm->ref); atomic_set(&mm->pincount, 0); return mm; } static void vgpu_free_mm(struct intel_vgpu_mm *mm) { kfree(mm); } /** * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU * @vgpu: a vGPU * @root_entry_type: ppgtt root entry type * @pdps: guest pdps. * * This function is used to create a ppgtt mm object for a vGPU. * * Returns: * Zero on success, negative error code in pointer if failed. */ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_mm *mm; int ret; mm = vgpu_alloc_mm(vgpu); if (!mm) return ERR_PTR(-ENOMEM); mm->type = INTEL_GVT_MM_PPGTT; GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); mm->ppgtt_mm.root_entry_type = root_entry_type; INIT_LIST_HEAD(&mm->ppgtt_mm.list); INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); INIT_LIST_HEAD(&mm->ppgtt_mm.link); if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) mm->ppgtt_mm.guest_pdps[0] = pdps[0]; else memcpy(mm->ppgtt_mm.guest_pdps, pdps, sizeof(mm->ppgtt_mm.guest_pdps)); ret = shadow_ppgtt_mm(mm); if (ret) { gvt_vgpu_err("failed to shadow ppgtt mm\n"); vgpu_free_mm(mm); return ERR_PTR(ret); } list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); mutex_lock(&gvt->gtt.ppgtt_mm_lock); list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); mutex_unlock(&gvt->gtt.ppgtt_mm_lock); return mm; } static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) { struct intel_vgpu_mm *mm; unsigned long nr_entries; mm = vgpu_alloc_mm(vgpu); if (!mm) return ERR_PTR(-ENOMEM); mm->type = INTEL_GVT_MM_GGTT; nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; mm->ggtt_mm.virtual_ggtt = vzalloc(array_size(nr_entries, vgpu->gvt->device_info.gtt_entry_size)); if (!mm->ggtt_mm.virtual_ggtt) { vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); if (!mm->ggtt_mm.host_ggtt_aperture) { vfree(mm->ggtt_mm.virtual_ggtt); vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); if (!mm->ggtt_mm.host_ggtt_hidden) { vfree(mm->ggtt_mm.host_ggtt_aperture); vfree(mm->ggtt_mm.virtual_ggtt); vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } return mm; } /** * _intel_vgpu_mm_release - destroy a mm object * @mm_ref: a kref object * * This function is used to destroy a mm object for vGPU * */ void _intel_vgpu_mm_release(struct kref *mm_ref) { struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); if (GEM_WARN_ON(atomic_read(&mm->pincount))) gvt_err("vgpu mm pin count bug detected\n"); if (mm->type == INTEL_GVT_MM_PPGTT) { list_del(&mm->ppgtt_mm.list); mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); list_del(&mm->ppgtt_mm.lru_list); mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); vfree(mm->ggtt_mm.host_ggtt_aperture); vfree(mm->ggtt_mm.host_ggtt_hidden); } vgpu_free_mm(mm); } /** * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object * @mm: a vGPU mm object * * This function is called when user doesn't want to use a vGPU mm object */ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) { atomic_dec_if_positive(&mm->pincount); } /** * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object * @mm: target vgpu mm * * This function is called when user wants to use a vGPU mm object. If this * mm object hasn't been shadowed yet, the shadow will be populated at this * time. * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) { int ret; atomic_inc(&mm->pincount); if (mm->type == INTEL_GVT_MM_PPGTT) { ret = shadow_ppgtt_mm(mm); if (ret) return ret; mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); list_move_tail(&mm->ppgtt_mm.lru_list, &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); } return 0; } static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) { struct intel_vgpu_mm *mm; struct list_head *pos, *n; mutex_lock(&gvt->gtt.ppgtt_mm_lock); list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); if (atomic_read(&mm->pincount)) continue; list_del_init(&mm->ppgtt_mm.lru_list); mutex_unlock(&gvt->gtt.ppgtt_mm_lock); invalidate_ppgtt_mm(mm); return 1; } mutex_unlock(&gvt->gtt.ppgtt_mm_lock); return 0; } /* * GMA translation APIs. */ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_vgpu *vgpu = mm->vgpu; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); if (!s) return -ENXIO; if (!guest) ppgtt_get_shadow_entry(s, e, index); else ppgtt_get_guest_entry(s, e, index); return 0; } /** * intel_vgpu_gma_to_gpa - translate a gma to GPA * @mm: mm object. could be a PPGTT or GGTT mm object * @gma: graphics memory address in this mm object * * This function is used to translate a graphics memory address in specific * graphics memory space to guest physical address. * * Returns: * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. */ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; unsigned long gpa = INTEL_GVT_INVALID_ADDR; unsigned long gma_index[4]; struct intel_gvt_gtt_entry e; int i, levels = 0; int ret; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT); if (mm->type == INTEL_GVT_MM_GGTT) { if (!vgpu_gmadr_is_valid(vgpu, gma)) goto err; ggtt_get_guest_entry(mm, &e, gma_ops->gma_to_ggtt_pte_index(gma)); gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); } else { switch (mm->ppgtt_mm.root_entry_type) { case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: ppgtt_get_shadow_root_entry(mm, &e, 0); gma_index[0] = gma_ops->gma_to_pml4_index(gma); gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); gma_index[2] = gma_ops->gma_to_pde_index(gma); gma_index[3] = gma_ops->gma_to_pte_index(gma); levels = 4; break; case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: ppgtt_get_shadow_root_entry(mm, &e, gma_ops->gma_to_l3_pdp_index(gma)); gma_index[0] = gma_ops->gma_to_pde_index(gma); gma_index[1] = gma_ops->gma_to_pte_index(gma); levels = 2; break; default: GEM_BUG_ON(1); } /* walk the shadow page table and get gpa from guest entry */ for (i = 0; i < levels; i++) { ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], (i == levels - 1)); if (ret) goto err; if (!pte_ops->test_present(&e)) { gvt_dbg_core("GMA 0x%lx is not present\n", gma); goto err; } } gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + (gma & ~I915_GTT_PAGE_MASK); trace_gma_translate(vgpu->id, "ppgtt", 0, mm->ppgtt_mm.root_entry_type, gma, gpa); } return gpa; err: gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); return INTEL_GVT_INVALID_ADDR; } static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; unsigned long index = off >> info->gtt_entry_size_shift; unsigned long gma; struct intel_gvt_gtt_entry e; if (bytes != 4 && bytes != 8) return -EINVAL; gma = index << I915_GTT_PAGE_SHIFT; if (!intel_gvt_ggtt_validate_range(vgpu, gma, 1 << I915_GTT_PAGE_SHIFT)) { gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); memset(p_data, 0, bytes); return 0; } ggtt_get_guest_entry(ggtt_mm, &e, index); memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), bytes); return 0; } /** * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read * @vgpu: a vGPU * @off: register offset * @p_data: data will be returned to guest * @bytes: data length * * This function is used to emulate the GTT MMIO register read * * Returns: * Zero on success, error code if failed. */ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; int ret; if (bytes != 4 && bytes != 8) return -EINVAL; off -= info->gtt_start_offset; ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); return ret; } static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; pfn = pte_ops->get_pfn(entry); if (pfn != vgpu->gvt->gtt.scratch_mfn) intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; dma_addr_t dma_addr; int ret; struct intel_gvt_partial_pte *partial_pte, *pos, *n; bool partial_update = false; if (bytes != 4 && bytes != 8) return -EINVAL; gma = g_gtt_index << I915_GTT_PAGE_SHIFT; /* the VM may configure the whole GM space when ballooning is used */ if (!vgpu_gmadr_is_valid(vgpu, gma)) return 0; e.type = GTT_TYPE_GGTT_PTE; memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes * write, save the first 4 bytes in a list and update virtual * PTE. Only update shadow PTE when the second 4 bytes comes. */ if (bytes < info->gtt_entry_size) { bool found = false; list_for_each_entry_safe(pos, n, &ggtt_mm->ggtt_mm.partial_pte_list, list) { if (g_gtt_index == pos->offset >> info->gtt_entry_size_shift) { if (off != pos->offset) { /* the second partial part*/ int last_off = pos->offset & (info->gtt_entry_size - 1); memcpy((void *)&e.val64 + last_off, (void *)&pos->data + last_off, bytes); list_del(&pos->list); kfree(pos); found = true; break; } /* update of the first partial part */ pos->data = e.val64; ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); return 0; } } if (!found) { /* the first partial part */ partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); if (!partial_pte) return -ENOMEM; partial_pte->offset = off; partial_pte->data = e.val64; list_add_tail(&partial_pte->list, &ggtt_mm->ggtt_mm.partial_pte_list); partial_update = true; } } if (!partial_update && (ops->test_present(&e))) { gfn = ops->get_pfn(&e); m.val64 = e.val64; m.type = e.type; ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial * update the entry in this situation p2m will fail * setting the shadow entry to point to a scratch page */ ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); } else { ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->clear_present(&m); } ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); ggtt_invalidate_pte(vgpu, &e); ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ggtt_invalidate(gvt->gt); return 0; } /* * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write * @vgpu: a vGPU * @off: register offset * @p_data: data from guest write * @bytes: data length * * This function is used to emulate the GTT MMIO register write * * Returns: * Zero on success, error code if failed. */ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; int ret; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; int i; if (bytes != 4 && bytes != 8) return -EINVAL; off -= info->gtt_start_offset; ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); /* if ggtt of last submitted context is written, * that context is probably got unpinned. * Set last shadowed ctx to invalid. */ for_each_engine(engine, vgpu->gvt->gt, i) { if (!s->last_ctx[i].valid) continue; if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift)) s->last_ctx[i].valid = false; } return ret; } static int alloc_scratch_pages(struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_gtt *gtt = &vgpu->gtt; const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; int i; struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_addr_t daddr; if (drm_WARN_ON(&i915->drm, type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_vgpu_err("fail to allocate scratch page\n"); return -ENOMEM; } daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_vgpu_err("fail to dmamap scratch_pt\n"); __free_page(virt_to_page(scratch_pt)); return -ENOMEM; } gtt->scratch_pt[type].page_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, gtt->scratch_pt[type].page_mfn); /* Build the tree by full filled the scratch pt with the entries which * point to the next level scratch pt or scratch page. The * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT) { struct intel_gvt_gtt_entry se; memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); se.type = get_entry_type(type - 1); ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); /* The entry parameters like present/writeable/cache type * set to the same as i915's scratch page tree. */ se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) se.val64 |= PPAT_CACHED; for (i = 0; i < page_entry_num; i++) ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } return 0; } static int release_scratch_page_tree(struct intel_vgpu *vgpu) { int i; struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_addr_t daddr; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { if (vgpu->gtt.scratch_pt[i].page != NULL) { daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page_mfn = 0; } } return 0; } static int create_scratch_page_tree(struct intel_vgpu *vgpu) { int i, ret; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { ret = alloc_scratch_pages(vgpu, i); if (ret) goto err; } return 0; err: release_scratch_page_tree(vgpu); return ret; } /** * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization * @vgpu: a vGPU * * This function is used to initialize per-vGPU graphics memory virtualization * components. * * Returns: * Zero on success, error code if failed. */ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) { struct intel_vgpu_gtt *gtt = &vgpu->gtt; INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL); INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head); INIT_LIST_HEAD(&gtt->oos_page_list_head); INIT_LIST_HEAD(&gtt->post_shadow_list_head); gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); if (IS_ERR(gtt->ggtt_mm)) { gvt_vgpu_err("fail to create mm for ggtt.\n"); return PTR_ERR(gtt->ggtt_mm); } intel_vgpu_reset_ggtt(vgpu, false); INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list); return create_scratch_page_tree(vgpu); } void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_mm *mm; list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); intel_vgpu_destroy_mm(mm); } if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) gvt_err("vgpu ppgtt mm is not fully destroyed\n"); if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { gvt_err("Why we still has spt not freed?\n"); ppgtt_free_all_spt(vgpu); } } static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) { struct intel_gvt_partial_pte *pos, *next; list_for_each_entry_safe(pos, next, &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) { gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", pos->offset, pos->data); kfree(pos); } intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); vgpu->gtt.ggtt_mm = NULL; } /** * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization * @vgpu: a vGPU * * This function is used to clean up per-vGPU graphics memory virtualization * components. * * Returns: * Zero on success, error code if failed. */ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_destroy_ggtt_mm(vgpu); release_scratch_page_tree(vgpu); } static void clean_spt_oos(struct intel_gvt *gvt) { struct intel_gvt_gtt *gtt = &gvt->gtt; struct list_head *pos, *n; struct intel_vgpu_oos_page *oos_page; WARN(!list_empty(&gtt->oos_page_use_list_head), "someone is still using oos page\n"); list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) { oos_page = container_of(pos, struct intel_vgpu_oos_page, list); list_del(&oos_page->list); free_page((unsigned long)oos_page->mem); kfree(oos_page); } } static int setup_spt_oos(struct intel_gvt *gvt) { struct intel_gvt_gtt *gtt = &gvt->gtt; struct intel_vgpu_oos_page *oos_page; int i; int ret; INIT_LIST_HEAD(&gtt->oos_page_free_list_head); INIT_LIST_HEAD(&gtt->oos_page_use_list_head); for (i = 0; i < preallocated_oos_pages; i++) { oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); if (!oos_page) { ret = -ENOMEM; goto fail; } oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0); if (!oos_page->mem) { ret = -ENOMEM; kfree(oos_page); goto fail; } INIT_LIST_HEAD(&oos_page->list); INIT_LIST_HEAD(&oos_page->vm_list); oos_page->id = i; list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head); } gvt_dbg_mm("%d oos pages preallocated\n", i); return 0; fail: clean_spt_oos(gvt); return ret; } /** * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object * @vgpu: a vGPU * @pdps: pdp root array * * This function is used to find a PPGTT mm object from mm object pool * * Returns: * pointer to mm object on success, NULL if failed. */ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) { struct intel_vgpu_mm *mm; struct list_head *pos; list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); switch (mm->ppgtt_mm.root_entry_type) { case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) return mm; break; case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, sizeof(mm->ppgtt_mm.guest_pdps))) return mm; break; default: GEM_BUG_ON(1); } } return NULL; } /** * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. * @vgpu: a vGPU * @root_entry_type: ppgtt root entry type * @pdps: guest pdps * * This function is used to find or create a PPGTT mm object from a guest. * * Returns: * Zero on success, negative error code if failed. */ struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) { struct intel_vgpu_mm *mm; mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); if (mm) { intel_vgpu_mm_get(mm); } else { mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); if (IS_ERR(mm)) gvt_vgpu_err("fail to create mm\n"); } return mm; } /** * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. * @vgpu: a vGPU * @pdps: guest pdps * * This function is used to find a PPGTT mm object from a guest and destroy it. * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) { struct intel_vgpu_mm *mm; mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); if (!mm) { gvt_vgpu_err("fail to find ppgtt instance.\n"); return -EINVAL; } intel_vgpu_mm_put(mm); return 0; } /** * intel_gvt_init_gtt - initialize mm components of a GVT device * @gvt: GVT device * * This function is called at the initialization stage, to initialize * the mm components of a GVT device. * * Returns: * zero on success, negative error code if failed. */ int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; void *page; struct device *dev = gvt->gt->i915->drm.dev; dma_addr_t daddr; gvt_dbg_core("init gtt\n"); gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops; page = (void *)get_zeroed_page(GFP_KERNEL); if (!page) { gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } daddr = dma_map_page(dev, virt_to_page(page), 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_err("fail to dmamap scratch ggtt page\n"); __free_page(virt_to_page(page)); return -ENOMEM; } gvt->gtt.scratch_page = virt_to_page(page); gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); return ret; } } INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); mutex_init(&gvt->gtt.ppgtt_mm_lock); return 0; } /** * intel_gvt_clean_gtt - clean up mm components of a GVT device * @gvt: GVT device * * This function is called at the driver unloading stage, to clean up * the mm components of a GVT device. * */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { struct device *dev = gvt->gt->i915->drm.dev; dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); if (enable_out_of_sync) clean_spt_oos(gvt); } /** * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances * @vgpu: a vGPU * * This function is called when invalidate all PPGTT instances of a vGPU. * */ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_mm *mm; list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); if (mm->type == INTEL_GVT_MM_PPGTT) { mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); list_del_init(&mm->ppgtt_mm.lru_list); mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); if (mm->ppgtt_mm.shadowed) invalidate_ppgtt_mm(mm); } } } /** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU * @invalidate_old: invalidate old entries * * This function is called at the vGPU create stage * to reset all the GGTT entries. * */ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry old_entry; u32 index; u32 num_entries; pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); pte_ops->set_present(&entry); index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; while (num_entries--) { if (invalidate_old) { ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); ggtt_invalidate_pte(vgpu, &old_entry); } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); } index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; while (num_entries--) { if (invalidate_old) { ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); ggtt_invalidate_pte(vgpu, &old_entry); } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); } ggtt_invalidate(gvt->gt); } /** * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries * @gvt: intel gvt device * * This function is called at driver resume stage to restore * GGTT entries of every vGPU. * */ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) { struct intel_vgpu *vgpu; struct intel_vgpu_mm *mm; int id; gen8_pte_t pte; u32 idx, num_low, num_hi, offset; /* Restore dirty host ggtt for all vGPUs */ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { mm = vgpu->gtt.ggtt_mm; num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_low; idx++) { pte = mm->ggtt_mm.host_ggtt_aperture[idx]; if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_hi; idx++) { pte = mm->ggtt_mm.host_ggtt_hidden[idx]; if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } } }
linux-master
drivers/gpu/drm/i915/gvt/gtt.c
/* * Copyright(c) 2011-2017 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "i915_drv.h" #include "gvt.h" /** * intel_vgpu_find_page_track - find page track rcord of guest page * @vgpu: a vGPU * @gfn: the gfn of guest page * * Returns: * A pointer to struct intel_vgpu_page_track if found, else NULL returned. */ struct intel_vgpu_page_track *intel_vgpu_find_page_track( struct intel_vgpu *vgpu, unsigned long gfn) { return radix_tree_lookup(&vgpu->page_track_tree, gfn); } /** * intel_vgpu_register_page_track - register a guest page to be tacked * @vgpu: a vGPU * @gfn: the gfn of guest page * @handler: page track handler * @priv: tracker private * * Returns: * zero on success, negative error code if failed. */ int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, gvt_page_track_handler_t handler, void *priv) { struct intel_vgpu_page_track *track; int ret; track = intel_vgpu_find_page_track(vgpu, gfn); if (track) return -EEXIST; track = kzalloc(sizeof(*track), GFP_KERNEL); if (!track) return -ENOMEM; track->handler = handler; track->priv_data = priv; ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); if (ret) { kfree(track); return ret; } return 0; } /** * intel_vgpu_unregister_page_track - unregister the tracked guest page * @vgpu: a vGPU * @gfn: the gfn of guest page * */ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, unsigned long gfn) { struct intel_vgpu_page_track *track; track = radix_tree_delete(&vgpu->page_track_tree, gfn); if (track) { if (track->tracked) intel_gvt_page_track_remove(vgpu, gfn); kfree(track); } } /** * intel_vgpu_enable_page_track - set write-protection on guest page * @vgpu: a vGPU * @gfn: the gfn of guest page * * Returns: * zero on success, negative error code if failed. */ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) { struct intel_vgpu_page_track *track; int ret; track = intel_vgpu_find_page_track(vgpu, gfn); if (!track) return -ENXIO; if (track->tracked) return 0; ret = intel_gvt_page_track_add(vgpu, gfn); if (ret) return ret; track->tracked = true; return 0; } /** * intel_vgpu_disable_page_track - cancel write-protection on guest page * @vgpu: a vGPU * @gfn: the gfn of guest page * * Returns: * zero on success, negative error code if failed. */ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) { struct intel_vgpu_page_track *track; int ret; track = intel_vgpu_find_page_track(vgpu, gfn); if (!track) return -ENXIO; if (!track->tracked) return 0; ret = intel_gvt_page_track_remove(vgpu, gfn); if (ret) return ret; track->tracked = false; return 0; } /** * intel_vgpu_page_track_handler - called when write to write-protected page * @vgpu: a vGPU * @gpa: the gpa of this write * @data: the writed data * @bytes: the length of this write * * Returns: * zero on success, negative error code if failed. */ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, void *data, unsigned int bytes) { struct intel_vgpu_page_track *page_track; int ret = 0; page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); if (!page_track) return -ENXIO; if (unlikely(vgpu->failsafe)) { /* Remove write protection to prevent furture traps. */ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); if (ret) gvt_err("guest page write error, gpa %llx\n", gpa); } return ret; }
linux-master
drivers/gpu/drm/i915/gvt/page_track.c
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <[email protected]> * Eddie Dong <[email protected]> * Zhiyuan Lv <[email protected]> * * Contributors: * Min He <[email protected]> * Tina Zhang <[email protected]> * Pei Zhang <[email protected]> * Niu Bing <[email protected]> * Ping Gao <[email protected]> * Zhi Wang <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" #include "i915_pvinfo.h" #include "intel_mchbar_regs.h" #include "display/intel_display_types.h" #include "display/intel_dmc_regs.h" #include "display/intel_dp_aux_regs.h" #include "display/intel_dpio_phy.h" #include "display/intel_fbc.h" #include "display/intel_fdi_regs.h" #include "display/intel_pps_regs.h" #include "display/intel_psr_regs.h" #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) #define PCH_PP_CONTROL _MMIO(0xc7204) #define PCH_PP_ON_DELAYS _MMIO(0xc7208) #define PCH_PP_OFF_DELAYS _MMIO(0xc720c) #define PCH_PP_DIVISOR _MMIO(0xc7210) unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) { struct drm_i915_private *i915 = gvt->gt->i915; if (IS_BROADWELL(i915)) return D_BDW; else if (IS_SKYLAKE(i915)) return D_SKL; else if (IS_KABYLAKE(i915)) return D_KBL; else if (IS_BROXTON(i915)) return D_BXT; else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) return D_CFL; return 0; } static bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device) { return intel_gvt_get_device_type(gvt) & device; } static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); } static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); } struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, unsigned int offset) { struct intel_gvt_mmio_info *e; hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { if (e->offset == offset) return e; } return NULL; } static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, u16 flags, u32 addr_mask, u32 ro_mask, u32 device, gvt_mmio_func read, gvt_mmio_func write) { struct intel_gvt_mmio_info *p; u32 start, end, i; if (!intel_gvt_match_device(gvt, device)) return 0; if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; start = offset; end = offset + size; for (i = start; i < end; i += 4) { p = intel_gvt_find_mmio_info(gvt, i); if (!p) { WARN(1, "assign a handler to a non-tracked mmio %x\n", i); return -ENODEV; } p->ro_mask = ro_mask; gvt->mmio.mmio_attribute[i / 4] = flags; if (read) p->read = read; if (write) p->write = write; } return 0; } /** * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine * @gvt: a GVT device * @offset: register offset * * Returns: * The engine containing the offset within its mmio page. */ const struct intel_engine_cs * intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) { struct intel_engine_cs *engine; enum intel_engine_id id; offset &= ~GENMASK(11, 0); for_each_engine(engine, gvt->gt, id) if (engine->mmio_base == offset) return engine; return NULL; } #define offset_to_fence_num(offset) \ ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3) #define fence_num_to_offset(num) \ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) { switch (reason) { case GVT_FAILSAFE_UNSUPPORTED_GUEST: pr_err("Detected your guest driver doesn't support GVT-g.\n"); break; case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: pr_err("Graphics resource is not enough for the guest\n"); break; case GVT_FAILSAFE_GUEST_ERR: pr_err("GVT Internal error for the guest\n"); break; default: break; } pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); vgpu->failsafe = true; } static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, unsigned int fence_num, void *p_data, unsigned int bytes) { unsigned int max_fence = vgpu_fence_sz(vgpu); if (fence_num >= max_fence) { gvt_vgpu_err("access oob fence reg %d/%d\n", fence_num, max_fence); /* When guest access oob fence regs without access * pv_info first, we treat guest not supporting GVT, * and we will let vgpu enter failsafe mode. */ if (!vgpu->pv_notified) enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); memset(p_data, 0, bytes); return -EINVAL; } return 0; } static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) { if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); else if (!ips) gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id); else { /* All engines must be enabled together for vGPU, * since we don't know which engine the ppgtt will * bind to when shadowing. */ gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n", ips); return -EINVAL; } } write_vreg(vgpu, offset, p_data, bytes); return 0; } static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { int ret; ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off), p_data, bytes); if (ret) return ret; read_vreg(vgpu, off, p_data, bytes); return 0; } static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; unsigned int fence_num = offset_to_fence_num(off); int ret; ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes); if (ret) return ret; write_vreg(vgpu, off, p_data, bytes); mmio_hw_access_pre(gvt->gt); intel_vgpu_write_fence(vgpu, fence_num, vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); mmio_hw_access_post(gvt->gt); return 0; } #define CALC_MODE_MASK_REG(old, new) \ (((new) & GENMASK(31, 16)) \ | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \ | ((new) & ((new) >> 16)))) static int mul_force_wake_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 old, new; u32 ack_reg_offset; old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; break; case FORCEWAKE_GT_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG; break; case FORCEWAKE_MEDIA_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG; break; default: /*should not hit here*/ gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; } vgpu_vreg(vgpu, offset) = new; vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0)); return 0; } static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { intel_engine_mask_t engine_mask = 0; u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & GEN6_GRDOM_FULL) { gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); engine_mask = ALL_ENGINES; } else { if (data & GEN6_GRDOM_RENDER) { gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); engine_mask |= BIT(RCS0); } if (data & GEN6_GRDOM_MEDIA) { gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); engine_mask |= BIT(VCS0); } if (data & GEN6_GRDOM_BLT) { gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); engine_mask |= BIT(BCS0); } if (data & GEN6_GRDOM_VECS) { gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); engine_mask |= BIT(VECS0); } if (data & GEN8_GRDOM_MEDIA2) { gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); engine_mask |= BIT(VCS1); } if (data & GEN9_GRDOM_GUC) { gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; } engine_mask &= vgpu->gvt->gt->info.engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); /* sw will wait for the device to ack the reset request */ vgpu_vreg(vgpu, offset) = 0; return 0; } static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes); } static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes); } static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) { vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON; vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE; vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN; vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE; } else vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~(PP_ON | PP_SEQUENCE_POWER_DOWN | PP_CYCLE_DELAY_ACTIVE); return 0; } static int transconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE) vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE; else vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE; return 0; } static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE) vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK; else vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK; if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK) vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE; else vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE; return 0; } static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { switch (offset) { case 0xe651c: case 0xe661c: case 0xe671c: case 0xe681c: vgpu_vreg(vgpu, offset) = 1 << 17; break; case 0xe6c04: vgpu_vreg(vgpu, offset) = 0x3; break; case 0xe6e1c: vgpu_vreg(vgpu, offset) = 0x2f << 16; break; default: return -EINVAL; } read_vreg(vgpu, offset, p_data, bytes); return 0; } /* * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to * TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on * setup_virtual_dp_monitor(). * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled * DPLL. Later guest driver may setup a different DPLLx when setting mode. * So the correct sequence to find DP stream clock is: * Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x. * Check correct PLLx for PORT_x to get PLL frequency and DP bitrate. * Then Refresh rate then can be calculated based on follow equations: * Pixel clock = h_total * v_total * refresh_rate * stream clock = Pixel clock * ls_clk = DP bitrate * Link M/N = strm_clk / ls_clk */ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)); switch (ddi_pll_sel) { case PORT_CLK_SEL_LCPLL_2700: dp_br = 270000 * 2; break; case PORT_CLK_SEL_LCPLL_1350: dp_br = 135000 * 2; break; case PORT_CLK_SEL_LCPLL_810: dp_br = 81000 * 2; break; case PORT_CLK_SEL_SPLL: { switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) { case SPLL_FREQ_810MHz: dp_br = 81000 * 2; break; case SPLL_FREQ_1350MHz: dp_br = 135000 * 2; break; case SPLL_FREQ_2700MHz: dp_br = 270000 * 2; break; default: gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n", vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL)); break; } break; } case PORT_CLK_SEL_WRPLL1: case PORT_CLK_SEL_WRPLL2: { u32 wrpll_ctl; int refclk, n, p, r; if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1) wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1)); else wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2)); switch (wrpll_ctl & WRPLL_REF_MASK) { case WRPLL_REF_PCH_SSC: refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; break; default: gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n", vgpu->id, port_name(port), wrpll_ctl); goto out; } r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK; p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; dp_br = (refclk * n / 10) / (p * r) * 2; break; } default: gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n", vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port))); break; } out: return dp_br; } static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; struct dpll clock = {0}; u32 temp; /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */ switch (port) { case PORT_A: phy = DPIO_PHY1; ch = DPIO_CH0; break; case PORT_B: phy = DPIO_PHY0; ch = DPIO_CH0; break; case PORT_C: phy = DPIO_PHY0; ch = DPIO_CH1; break; default: gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port)); goto out; } temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)); if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) { gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n", vgpu->id, port_name(port), temp); goto out; } clock.m1 = 2; clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22; if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE) clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2))); clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1))); clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch))); clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch))); clock.m = clock.m1 * clock.m2; clock.p = clock.p1 * clock.p2 * 5; if (clock.n == 0 || clock.p == 0) { gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port)); goto out; } clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22); clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p); dp_br = clock.dot; out: return dp_br; } static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0; /* Find the enabled DPLL for the DDI/PORT */ if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) && (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) { dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >> DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port); } else { gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n", vgpu->id, port_name(port)); return dp_br; } /* Find PLL output frequency from correct DPLL, and get bir rate */ switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) & DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >> DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) { case DPLL_CTRL1_LINK_RATE_810: dp_br = 81000 * 2; break; case DPLL_CTRL1_LINK_RATE_1080: dp_br = 108000 * 2; break; case DPLL_CTRL1_LINK_RATE_1350: dp_br = 135000 * 2; break; case DPLL_CTRL1_LINK_RATE_1620: dp_br = 162000 * 2; break; case DPLL_CTRL1_LINK_RATE_2160: dp_br = 216000 * 2; break; case DPLL_CTRL1_LINK_RATE_2700: dp_br = 270000 * 2; break; default: dp_br = 0; gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n", vgpu->id, port_name(port), dpll_id); } return dp_br; } static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; enum port port; u32 dp_br, link_m, link_n, htotal, vtotal; /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */ port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; if (port != PORT_B && port != PORT_D) { gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port)); return; } /* Calculate DP bitrate from PLL */ if (IS_BROADWELL(dev_priv)) dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port); else if (IS_BROXTON(dev_priv)) dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port); else dp_br = skl_vgpu_get_dp_bitrate(vgpu, port); /* Get DP link symbol clock M/N */ link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)); link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); /* Get H/V total from transcoder timing */ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); if (dp_br && link_n && htotal && vtotal) { u64 pixel_clk = 0; u32 new_rate = 0; u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k); /* Calcuate pixel clock by (ls_clk * M / N) */ pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n); pixel_clk *= MSEC_PER_SEC; /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */ new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1)); if (*old_rate != new_rate) *old_rate = new_rate; gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n", vgpu->id, pipe_name(PIPE_A), new_rate); } } static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & TRANSCONF_ENABLE) { vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE; vgpu_update_refresh_rate(vgpu); vgpu_update_vblank_emulation(vgpu, true); } else { vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE; vgpu_update_vblank_emulation(vgpu, false); } return 0; } /* sorted in ascending order */ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0xd80), GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) CL_PRIMITIVES_COUNT, //_MMIO(0x2340) PS_INVOCATION_COUNT, //_MMIO(0x2348) PS_DEPTH_COUNT, //_MMIO(0x2350) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), _MMIO(0x2698), _MMIO(0x2754), _MMIO(0x28a0), _MMIO(0x4de0), _MMIO(0x4de4), _MMIO(0x4dfc), GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010) _MMIO(0x7014), HDC_CHICKEN0,//_MMIO(0x7300) GEN8_HDC_CHICKEN1,//_MMIO(0x7304) _MMIO(0x7700), _MMIO(0x7704), _MMIO(0x7708), _MMIO(0x770c), _MMIO(0x83a8), _MMIO(0xb110), _MMIO(0xb118), _MMIO(0xe100), _MMIO(0xe18c), _MMIO(0xe48c), _MMIO(0xe5f4), _MMIO(0x64844), }; /* a simple bsearch */ static inline bool in_whitelist(u32 reg) { int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); i915_reg_t *array = force_nonpriv_white_list; while (left < right) { int mid = (left + right)/2; if (reg > array[mid].reg) left = mid + 1; else if (reg < array[mid].reg) right = mid; else return true; } return false; } static int force_nonpriv_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) { gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", vgpu->id, offset, bytes); return -EINVAL; } if (!in_whitelist(reg_nonpriv) && reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) { gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", vgpu->id, reg_nonpriv, offset); } else intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); return 0; } static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) { vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE; } else { vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE; if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E))) vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) &= ~DP_TP_STATUS_AUTOTRAIN_DONE; } return 0; } static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data; return 0; } #define FDI_LINK_TRAIN_PATTERN1 0 #define FDI_LINK_TRAIN_PATTERN2 1 static int fdi_auto_training_started(struct intel_vgpu *vgpu) { u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E)); u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL); u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E)); if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) && (rx_ctl & FDI_RX_ENABLE) && (rx_ctl & FDI_AUTO_TRAINING) && (tx_ctl & DP_TP_CTL_ENABLE) && (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN)) return 1; else return 0; } static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, enum pipe pipe, unsigned int train_pattern) { i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl; unsigned int fdi_rx_check_bits, fdi_tx_check_bits; unsigned int fdi_rx_train_bits, fdi_tx_train_bits; unsigned int fdi_iir_check_bits; fdi_rx_imr = FDI_RX_IMR(pipe); fdi_tx_ctl = FDI_TX_CTL(pipe); fdi_rx_ctl = FDI_RX_CTL(pipe); if (train_pattern == FDI_LINK_TRAIN_PATTERN1) { fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT; fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1; fdi_iir_check_bits = FDI_RX_BIT_LOCK; } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) { fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT; fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; } else { gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); return -EINVAL; } fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits; fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits; /* If imr bit has been masked */ if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits) return 0; if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits) == fdi_tx_check_bits) && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits) == fdi_rx_check_bits)) return 1; else return 0; } #define INVALID_INDEX (~0U) static unsigned int calc_index(unsigned int offset, unsigned int start, unsigned int next, unsigned int end, i915_reg_t i915_end) { unsigned int range = next - start; if (!end) end = i915_mmio_reg_offset(i915_end); if (offset < start || offset > end) return INVALID_INDEX; offset -= start; return offset / range; } #define FDI_RX_CTL_TO_PIPE(offset) \ calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C)) #define FDI_TX_CTL_TO_PIPE(offset) \ calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C)) #define FDI_RX_IMR_TO_PIPE(offset) \ calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C)) static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { i915_reg_t fdi_rx_iir; unsigned int index; int ret; if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX) index = FDI_RX_CTL_TO_PIPE(offset); else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX) index = FDI_TX_CTL_TO_PIPE(offset); else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) index = FDI_RX_IMR_TO_PIPE(offset); else { gvt_vgpu_err("Unsupported registers %x\n", offset); return -EINVAL; } write_vreg(vgpu, offset, p_data, bytes); fdi_rx_iir = FDI_RX_IIR(index); ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1); if (ret < 0) return ret; if (ret) vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK; ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2); if (ret < 0) return ret; if (ret) vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK; if (offset == _FDI_RXA_CTL) if (fdi_auto_training_started(vgpu)) vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |= DP_TP_STATUS_AUTOTRAIN_DONE; return 0; } #define DP_TP_CTL_TO_PORT(offset) \ calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E)) static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { i915_reg_t status_reg; unsigned int index; u32 data; write_vreg(vgpu, offset, p_data, bytes); index = DP_TP_CTL_TO_PORT(offset); data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8; if (data == 0x2) { status_reg = DP_TP_STATUS(index); vgpu_vreg_t(vgpu, status_reg) |= (1 << 25); } return 0; } static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 reg_val; u32 sticky_mask; reg_val = *((u32 *)p_data); sticky_mask = GENMASK(27, 26) | (1 << 24); vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) | (vgpu_vreg(vgpu, offset) & sticky_mask); vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask); return 0; } static int pch_adpa_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER; return 0; } static int south_chicken2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & FDI_MPHY_IOSFSB_RESET_CTL) vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS; else vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS; return 0; } #define DSPSURF_TO_PIPE(offset) \ calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C)) static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 pipe = DSPSURF_TO_PIPE(offset); int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); write_vreg(vgpu, offset, p_data, bytes); vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP) intel_vgpu_trigger_virtual_event(vgpu, event); else set_bit(event, vgpu->irq.flip_done_event[pipe]); return 0; } #define SPRSURF_TO_PIPE(offset) \ calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C)) static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 pipe = SPRSURF_TO_PIPE(offset); int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0); write_vreg(vgpu, offset, p_data, bytes); vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP) intel_vgpu_trigger_virtual_event(vgpu, event); else set_bit(event, vgpu->irq.flip_done_event[pipe]); return 0; } static int reg50080_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; enum pipe pipe = REG_50080_TO_PIPE(offset); enum plane_id plane = REG_50080_TO_PLANE(offset); int event = SKL_FLIP_EVENT(pipe, plane); write_vreg(vgpu, offset, p_data, bytes); if (plane == PLANE_PRIMARY) { vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; } else { vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); } if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC) intel_vgpu_trigger_virtual_event(vgpu, event); else set_bit(event, vgpu->irq.flip_done_event[pipe]); return 0; } static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; enum intel_gvt_event_type event; if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) event = AUX_CHANNEL_A; else if (reg == _PCH_DPB_AUX_CH_CTL || reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) event = AUX_CHANNEL_B; else if (reg == _PCH_DPC_AUX_CH_CTL || reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) event = AUX_CHANNEL_C; else if (reg == _PCH_DPD_AUX_CH_CTL || reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { drm_WARN_ON(&dev_priv->drm, true); return -EINVAL; } intel_vgpu_trigger_virtual_event(vgpu, event); return 0; } static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, unsigned int reg, int len, bool data_valid) { /* mark transaction done */ value |= DP_AUX_CH_CTL_DONE; value &= ~DP_AUX_CH_CTL_SEND_BUSY; value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR; if (data_valid) value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR; else value |= DP_AUX_CH_CTL_TIME_OUT_ERROR; /* message size */ value &= ~(0xf << 20); value |= (len << 20); vgpu_vreg(vgpu, reg) = value; if (value & DP_AUX_CH_CTL_INTERRUPT) return trigger_aux_channel_interrupt(vgpu, reg); return 0; } static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, u8 t) { if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { /* training pattern 1 for CR */ /* set LANE0_CR_DONE, LANE1_CR_DONE */ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE; /* set LANE2_CR_DONE, LANE3_CR_DONE */ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE; } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_2) { /* training pattern 2 for EQ */ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE; dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED; /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE; dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED; /* set INTERLANE_ALIGN_DONE */ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |= DPCD_INTERLANE_ALIGN_DONE; } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_LINK_TRAINING_DISABLED) { /* finish link training */ /* set sink status as synchronized */ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC; } } #define _REG_HSW_DP_AUX_CH_CTL(dp) \ ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010) #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100) #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8) #define dpy_is_valid_port(port) \ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS)) static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct intel_vgpu_display *display = &vgpu->display; int msg, addr, ctrl, op, len; int port_index = OFFSET_TO_DP_AUX_PORT(offset); struct intel_vgpu_dpcd_data *dpcd = NULL; struct intel_vgpu_port *port = NULL; u32 data; if (!dpy_is_valid_port(port_index)) { gvt_vgpu_err("Unsupported DP port access!\n"); return 0; } write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; } else if (IS_BROADWELL(vgpu->gvt->gt->i915) && offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) { /* write to the data registers */ return 0; } if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) { /* just want to clear the sticky bits */ vgpu_vreg(vgpu, offset) = 0; return 0; } port = &display->ports[port_index]; dpcd = port->dpcd; /* read out message from DATA1 register */ msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; ctrl = (msg >> 24) & 0xff; len = msg & 0xff; op = ctrl >> 4; if (op == GVT_AUX_NATIVE_WRITE) { int t; u8 buf[16]; if ((addr + len + 1) >= DPCD_SIZE) { /* * Write request exceeds what we supported, * DCPD spec: When a Source Device is writing a DPCD * address not supported by the Sink Device, the Sink * Device shall reply with AUX NACK and “M” equal to * zero. */ /* NAK the write */ vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK; dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true); return 0; } /* * Write request format: Headr (command + address + size) occupies * 4 bytes, followed by (len + 1) bytes of data. See details at * intel_dp_aux_transfer(). */ if ((len + 1 + 4) > AUX_BURST_SIZE) { gvt_vgpu_err("dp_aux_header: len %d is too large\n", len); return -EINVAL; } /* unpack data from vreg to buf */ for (t = 0; t < 4; t++) { u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4); buf[t * 4] = (r >> 24) & 0xff; buf[t * 4 + 1] = (r >> 16) & 0xff; buf[t * 4 + 2] = (r >> 8) & 0xff; buf[t * 4 + 3] = r & 0xff; } /* write to virtual DPCD */ if (dpcd && dpcd->data_valid) { for (t = 0; t <= len; t++) { int p = addr + t; dpcd->data[p] = buf[t]; /* check for link training */ if (p == DPCD_TRAINING_PATTERN_SET) dp_aux_ch_ctl_link_training(dpcd, buf[t]); } } /* ACK the write */ vgpu_vreg(vgpu, offset + 4) = 0; dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1, dpcd && dpcd->data_valid); return 0; } if (op == GVT_AUX_NATIVE_READ) { int idx, i, ret = 0; if ((addr + len + 1) >= DPCD_SIZE) { /* * read request exceeds what we supported * DPCD spec: A Sink Device receiving a Native AUX CH * read request for an unsupported DPCD address must * reply with an AUX ACK and read data set equal to * zero instead of replying with AUX NACK. */ /* ACK the READ*/ vgpu_vreg(vgpu, offset + 4) = 0; vgpu_vreg(vgpu, offset + 8) = 0; vgpu_vreg(vgpu, offset + 12) = 0; vgpu_vreg(vgpu, offset + 16) = 0; vgpu_vreg(vgpu, offset + 20) = 0; dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2, true); return 0; } for (idx = 1; idx <= 5; idx++) { /* clear the data registers */ vgpu_vreg(vgpu, offset + 4 * idx) = 0; } /* * Read reply format: ACK (1 byte) plus (len + 1) bytes of data. */ if ((len + 2) > AUX_BURST_SIZE) { gvt_vgpu_err("dp_aux_header: len %d is too large\n", len); return -EINVAL; } /* read from virtual DPCD to vreg */ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */ if (dpcd && dpcd->data_valid) { for (i = 1; i <= (len + 1); i++) { int t; t = dpcd->data[addr + i - 1]; t <<= (24 - 8 * (i % 4)); ret |= t; if ((i % 4 == 3) || (i == (len + 1))) { vgpu_vreg(vgpu, offset + (i / 4 + 1) * 4) = ret; ret = 0; } } } dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2, dpcd && dpcd->data_valid); return 0; } /* i2c transaction starts */ intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data); if (data & DP_AUX_CH_CTL_INTERRUPT) trigger_aux_channel_interrupt(vgpu, offset); return 0; } static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH); write_vreg(vgpu, offset, p_data, bytes); return 0; } static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool vga_disable; write_vreg(vgpu, offset, p_data, bytes); vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE; gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id, vga_disable ? "Disable" : "Enable"); return 0; } static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu, unsigned int sbi_offset) { struct intel_vgpu_display *display = &vgpu->display; int num = display->sbi.number; int i; for (i = 0; i < num; ++i) if (display->sbi.registers[i].offset == sbi_offset) break; if (i == num) return 0; return display->sbi.registers[i].value; } static void write_virtual_sbi_register(struct intel_vgpu *vgpu, unsigned int offset, u32 value) { struct intel_vgpu_display *display = &vgpu->display; int num = display->sbi.number; int i; for (i = 0; i < num; ++i) { if (display->sbi.registers[i].offset == offset) break; } if (i == num) { if (num == SBI_REG_MAX) { gvt_vgpu_err("SBI caching meets maximum limits\n"); return; } display->sbi.number++; } display->sbi.registers[i].offset = offset; display->sbi.registers[i].value = value; } static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) { unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset); } read_vreg(vgpu, offset, p_data, bytes); return 0; } static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT); data |= SBI_READY; data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT); data |= SBI_RESPONSE_SUCCESS; vgpu_vreg(vgpu, offset) = data; if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) { unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA)); } return 0; } #define _vgtif_reg(x) \ (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)) static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool invalid_read = false; read_vreg(vgpu, offset, p_data, bytes); switch (offset) { case _vgtif_reg(magic) ... _vgtif_reg(vgt_id): if (offset + bytes > _vgtif_reg(vgt_id) + 4) invalid_read = true; break; case _vgtif_reg(avail_rs.mappable_gmadr.base) ... _vgtif_reg(avail_rs.fence_num): if (offset + bytes > _vgtif_reg(avail_rs.fence_num) + 4) invalid_read = true; break; case 0x78010: /* vgt_caps */ case 0x7881c: break; default: invalid_read = true; break; } if (invalid_read) gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; return 0; } static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) { enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; struct intel_vgpu_mm *mm; u64 *pdps; pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; fallthrough; case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: return intel_vgpu_put_ppgtt_mm(vgpu, pdps); case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case 1: /* Remove this in guest driver. */ break; default: gvt_vgpu_err("Invalid PV notification %d\n", notification); } return 0; } static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) { struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj; char *env[3] = {NULL, NULL, NULL}; char vmid_str[20]; char display_ready_str[20]; snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready); env[0] = display_ready_str; snprintf(vmid_str, 20, "VMID=%d", vgpu->id); env[1] = vmid_str; return kobject_uevent_env(kobj, KOBJ_ADD, env); } static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data = *(u32 *)p_data; bool invalid_write = false; switch (offset) { case _vgtif_reg(display_ready): send_display_ready_uevent(vgpu, data ? 1 : 0); break; case _vgtif_reg(g2v_notify): handle_g2v_notification(vgpu, data); break; /* add xhot and yhot to handled list to avoid error log */ case _vgtif_reg(cursor_x_hot): case _vgtif_reg(cursor_y_hot): case _vgtif_reg(pdp[0].lo): case _vgtif_reg(pdp[0].hi): case _vgtif_reg(pdp[1].lo): case _vgtif_reg(pdp[1].hi): case _vgtif_reg(pdp[2].lo): case _vgtif_reg(pdp[2].hi): case _vgtif_reg(pdp[3].lo): case _vgtif_reg(pdp[3].hi): case _vgtif_reg(execlist_context_descriptor_lo): case _vgtif_reg(execlist_context_descriptor_hi): break; case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): invalid_write = true; enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); break; default: invalid_write = true; gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", offset, bytes, data); break; } if (!invalid_write) write_vreg(vgpu, offset, p_data, bytes); return 0; } static int pf_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 val = *(u32 *)p_data; if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) { drm_WARN_ONCE(&i915->drm, true, "VM(%d): guest is trying to scaling a plane\n", vgpu->id); return 0; } return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); } static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL)) vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); else vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); return 0; } static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; else vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; return 0; } static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM) vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM; return 0; } static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 mode; write_vreg(vgpu, offset, p_data, bytes); mode = vgpu_vreg(vgpu, offset); if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { drm_WARN_ONCE(&i915->drm, 1, "VM(%d): iGVT-g doesn't support GuC\n", vgpu->id); return 0; } return 0; } static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 trtte = *(u32 *)p_data; if ((trtte & 1) && (trtte & (1 << 1)) == 0) { drm_WARN(&i915->drm, 1, "VM(%d): Use physical address for TRTT!\n", vgpu->id); return -EINVAL; } write_vreg(vgpu, offset, p_data, bytes); return 0; } static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); return 0; } static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = 0; if (vgpu_vreg(vgpu, 0x46010) & (1 << 31)) v |= (1 << 0); if (vgpu_vreg(vgpu, 0x46014) & (1 << 31)) v |= (1 << 8); if (vgpu_vreg(vgpu, 0x46040) & (1 << 31)) v |= (1 << 16); if (vgpu_vreg(vgpu, 0x46060) & (1 << 31)) v |= (1 << 24); vgpu_vreg(vgpu, offset) = v; return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); } static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 value = *(u32 *)p_data; u32 cmd = value & 0xff; u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA); switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: if (IS_SKYLAKE(vgpu->gvt->gt->i915) || IS_KABYLAKE(vgpu->gvt->gt->i915) || IS_COFFEELAKE(vgpu->gvt->gt->i915) || IS_COMETLAKE(vgpu->gvt->gt->i915)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read * from skylake platform. */ if (!*data0) *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; } else if (IS_BROXTON(vgpu->gvt->gt->i915)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read * from Broxton MRB. */ if (!*data0) *data0 = 0x16080707; else *data0 = 0x16161616; } break; case SKL_PCODE_CDCLK_CONTROL: if (IS_SKYLAKE(vgpu->gvt->gt->i915) || IS_KABYLAKE(vgpu->gvt->gt->i915) || IS_COFFEELAKE(vgpu->gvt->gt->i915) || IS_COMETLAKE(vgpu->gvt->gt->i915)) *data0 = SKL_CDCLK_READY_FOR_CHANGE; break; case GEN6_PCODE_READ_RC6VIDS: *data0 |= 0x1; break; } gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", vgpu->id, value, *data0); /** * PCODE_READY clear means ready for pcode read/write, * PCODE_ERROR_MASK clear means no error happened. In GVT-g we * always emulate as pcode read/write success and ready for access * anytime, since we don't touch real physical registers here. */ value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK); return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); } static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 value = *(u32 *)p_data; const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); if (value != 0 && !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", offset, value); return -EINVAL; } /* * Need to emulate all the HWSP register write to ensure host can * update the VM CSB status correctly. Here listed registers can * support BDW, SKL or other platforms with same HWSP registers. */ if (unlikely(!engine)) { gvt_vgpu_err("access unknown hardware status page register:0x%x\n", offset); return -EINVAL; } vgpu->hws_pga[engine->id] = value; gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n", vgpu->id, value, offset); return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); } static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; if (IS_BROXTON(vgpu->gvt->gt->i915)) v &= (1 << 31) | (1 << 29); else v &= (1 << 31) | (1 << 29) | (1 << 9) | (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); v |= (v >> 1); return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); } static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; /* other bits are MBZ. */ v &= (1 << 31) | (1 << 30); v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30)); vgpu_vreg(vgpu, offset) = v; return 0; } static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; if (v & BXT_DE_PLL_PLL_ENABLE) v |= BXT_DE_PLL_LOCK; vgpu_vreg(vgpu, offset) = v; return 0; } static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; if (v & PORT_PLL_ENABLE) v |= PORT_PLL_LOCK; vgpu_vreg(vgpu, offset) = v; return 0; } static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; switch (offset) { case _PHY_CTL_FAMILY_EDP: vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; break; case _PHY_CTL_FAMILY_DDI: vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; break; } vgpu_vreg(vgpu, offset) = v; return 0; } static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = vgpu_vreg(vgpu, offset); v &= ~UNIQUE_TRANGE_EN_METHOD; vgpu_vreg(vgpu, offset) = v; return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); } static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { vgpu_vreg(vgpu, offset - 0x600) = v; vgpu_vreg(vgpu, offset - 0x800) = v; } else { vgpu_vreg(vgpu, offset - 0x400) = v; vgpu_vreg(vgpu, offset - 0x600) = v; } vgpu_vreg(vgpu, offset) = v; return 0; } static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 v = *(u32 *)p_data; if (v & BIT(0)) { vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_RESERVED; vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; } if (v & BIT(1)) { vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_RESERVED; vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= PHY_POWER_GOOD; } vgpu_vreg(vgpu, offset) = v; return 0; } static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { vgpu_vreg(vgpu, offset) = 0; return 0; } /* * FixMe: * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing * these MI_BATCH_BUFFER. * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT * PML4 PTE: PAT(0) PCD(1) PWT(1). * The performance is still expected to be low, will need further improvement. */ static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u64 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | GEN8_PPAT(1, 0) | GEN8_PPAT(2, 0) | GEN8_PPAT(3, CHV_PPAT_SNOOP) | GEN8_PPAT(4, CHV_PPAT_SNOOP) | GEN8_PPAT(5, CHV_PPAT_SNOOP) | GEN8_PPAT(6, CHV_PPAT_SNOOP) | GEN8_PPAT(7, CHV_PPAT_SNOOP); vgpu_vreg(vgpu, offset) = lower_32_bits(pat); return 0; } static int guc_status_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { /* keep MIA_IN_RESET before clearing */ read_vreg(vgpu, offset, p_data, bytes); vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; return 0; } static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(gvt, offset); /** * Read HW reg in following case * a. the offset isn't a ring mmio * b. the offset's ring is running on hw. * c. the offset is ring time stamp mmio */ if (!engine || vgpu == gvt->scheduler.engine_owner[engine->id] || offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) || offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) { mmio_hw_access_pre(gvt->gt); vgpu_vreg(vgpu, offset) = intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); mmio_hw_access_post(gvt->gt); } return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); } static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; int ret = 0; if (drm_WARN_ON(&i915->drm, !engine)) return -EINVAL; /* * Due to d3_entered is used to indicate skipping PPGTT invalidation on * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after * vGPU reset if in resuming. * In S0ix exit, the device power state also transite from D3 to D0 as * S3 resume, but no vGPU reset (triggered by QEMU devic model). After * S0ix exit, all engines continue to work. However the d3_entered * remains set which will break next vGPU reset logic (miss the expected * PPGTT invalidation). * Engines can only work in D0. Thus the 1st elsp write gives GVT a * chance to clear d3_entered. */ if (vgpu->d3_entered) vgpu->d3_entered = false; execlist = &vgpu->submission.execlist[engine->id]; execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { ret = intel_vgpu_submit_execlist(vgpu, engine); if(ret) gvt_vgpu_err("fail submit workload on ring %s\n", engine->name); } ++execlist->elsp_dwords.index; execlist->elsp_dwords.index &= 0x3; return ret; } static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data = *(u32 *)p_data; const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); bool enable_execlist; int ret; (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1); if (IS_COFFEELAKE(vgpu->gvt->gt->i915) || IS_COMETLAKE(vgpu->gvt->gt->i915)) (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); write_vreg(vgpu, offset, p_data, bytes); if (IS_MASKED_BITS_ENABLED(data, 1)) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) || IS_COMETLAKE(vgpu->gvt->gt->i915)) && IS_MASKED_BITS_ENABLED(data, 2)) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } /* when PPGTT mode enabled, we will check if guest has called * pvinfo, if not, we will treat this guest as non-gvtg-aware * guest, and stop emulating its cfg space, mmio, gtt, etc. */ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) || IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) && !vgpu->pv_notified) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) || IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) { enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); gvt_dbg_core("EXECLIST %s on ring %s\n", (enable_execlist ? "enabling" : "disabling"), engine->name); if (!enable_execlist) return 0; ret = intel_vgpu_select_submission_ops(vgpu, engine->mask, INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; intel_vgpu_start_schedule(vgpu); } return 0; } static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); vgpu_vreg(vgpu, offset) = 0; switch (offset) { case 0x4260: id = RCS0; break; case 0x4264: id = VCS0; break; case 0x4268: id = VCS1; break; case 0x426c: id = BCS0; break; case 0x4270: id = VECS0; break; default: return -EINVAL; } set_bit(id, (void *)vgpu->submission.tlb_handle_pending); return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET)) data |= RESET_CTL_READY_TO_RESET; else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)) data &= ~RESET_CTL_READY_TO_RESET; vgpu_vreg(vgpu, offset) = data; return 0; } static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data = *(u32 *)p_data; (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18); write_vreg(vgpu, offset, p_data, bytes); if (IS_MASKED_BITS_ENABLED(data, 0x10) || IS_MASKED_BITS_ENABLED(data, 0x8)) enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; } #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \ s, f, am, rm, d, r, w); \ if (ret) \ return ret; \ } while (0) #define MMIO_DH(reg, d, r, w) \ MMIO_F(reg, 4, 0, 0, 0, d, r, w) #define MMIO_DFH(reg, d, f, r, w) \ MMIO_F(reg, 4, f, 0, 0, d, r, w) #define MMIO_GM(reg, d, r, w) \ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) #define MMIO_GM_RDR(reg, d, r, w) \ MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w) #define MMIO_RO(reg, d, f, rm, r, w) \ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \ MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ if (HAS_ENGINE(gvt->gt, VCS1)) \ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) #define MMIO_RING_DFH(prefix, d, f, r, w) \ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w) #define MMIO_RING_GM(prefix, d, r, w) \ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) #define MMIO_RING_GM_RDR(prefix, d, r, w) \ MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w) #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) static int init_generic_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, gamw_echo_dev_rw_ia_write); MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); #define RING_REG(base) _MMIO((base) + 0x28) MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x134) MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x6c) MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL); #undef RING_REG MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL); MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); /* RING MODE */ #define RING_REG(base) _MMIO((base) + 0x29c) MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL, ring_mode_mmio_write); #undef RING_REG MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HSW_HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write); MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write); MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write); MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write); MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write); MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write); MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write); MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK | PORTB_HOTPLUG_STATUS_MASK | PORTC_HOTPLUG_STATUS_MASK | PORTD_HOTPLUG_STATUS_MASK, NULL, NULL); MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write); MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write); MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL); MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL); MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write); MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write); MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write); MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write); MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write); MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write); MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write); MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write); MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write); MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write); MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write); MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL); MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL); MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL); MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL); MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL); MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write); MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write); MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL); MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write); MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL); MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); return 0; } static int init_bdw_mmio_info(struct intel_gvt *gvt) { int ret; MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0, mmio_read_from_hw, NULL); #define RING_REG(base) _MMIO((base) + 0xd0) MMIO_RING_F(RING_REG, 4, F_RO, 0, ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, ring_reset_ctl_write); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x230) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x234) MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x244) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x370) MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x3a0) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); #undef RING_REG MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); #define RING_REG(base) _MMIO((base) + 0x270) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, D_BDW_PLUS, NULL, force_nonpriv_write); MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } static int init_skl_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL); MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL); MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* TRTT */ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE, NULL, gen9_trtte_write); MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, NULL, gen9_trtt_chicken_write); MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, csfe_chicken1_mmio_write); #undef CSFE_CHICKEN1_REG MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } static int init_bxt_mmio_info(struct intel_gvt *gvt) { int ret; MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, NULL, bxt_pcs_dw12_grp_write); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); return 0; } static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, unsigned int offset) { struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; for (i = 0; i < num; i++, block++) { if (offset >= i915_mmio_reg_offset(block->offset) && offset < i915_mmio_reg_offset(block->offset) + block->size) return block; } return NULL; } /** * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device * @gvt: GVT device * * This function is called at the driver unloading stage, to clean up the MMIO * information table of GVT device * */ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) { struct hlist_node *tmp; struct intel_gvt_mmio_info *e; int i; hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) kfree(e); kfree(gvt->mmio.mmio_block); gvt->mmio.mmio_block = NULL; gvt->mmio.num_mmio_block = 0; vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; } static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, u32 size) { struct intel_gvt *gvt = iter->data; struct intel_gvt_mmio_info *info, *p; u32 start, end, i; if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; start = offset; end = offset + size; for (i = start; i < end; i += 4) { p = intel_gvt_find_mmio_info(gvt, i); if (p) { WARN(1, "dup mmio definition offset %x\n", info->offset); /* We return -EEXIST here to make GVT-g load fail. * So duplicated MMIO can be found as soon as * possible. */ return -EEXIST; } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->offset = i; info->read = intel_vgpu_default_mmio_read; info->write = intel_vgpu_default_mmio_write; INIT_HLIST_NODE(&info->node); hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); gvt->mmio.num_tracked_mmio++; } return 0; } static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter, u32 offset, u32 size) { struct intel_gvt *gvt = iter->data; struct gvt_mmio_block *block = gvt->mmio.mmio_block; void *ret; ret = krealloc(block, (gvt->mmio.num_mmio_block + 1) * sizeof(*block), GFP_KERNEL); if (!ret) return -ENOMEM; gvt->mmio.mmio_block = block = ret; block += gvt->mmio.num_mmio_block; memset(block, 0, sizeof(*block)); block->offset = _MMIO(offset); block->size = size; gvt->mmio.num_mmio_block++; return 0; } static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset, u32 size) { if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0))) return handle_mmio(iter, offset, size); else return handle_mmio_block(iter, offset, size); } static int init_mmio_info(struct intel_gvt *gvt) { struct intel_gvt_mmio_table_iter iter = { .i915 = gvt->gt->i915, .data = gvt, .handle_mmio_cb = handle_mmio_cb, }; return intel_gvt_iterate_mmio_table(&iter); } static int init_mmio_block_handlers(struct intel_gvt *gvt) { struct gvt_mmio_block *block; block = find_mmio_block(gvt, VGT_PVINFO_PAGE); if (!block) { WARN(1, "fail to assign handlers to mmio block %x\n", i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); return -ENODEV; } block->read = pvinfo_mmio_read; block->write = pvinfo_mmio_write; return 0; } /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device * @gvt: GVT device * * This function is called at the initialization stage, to setup the MMIO * information table for GVT device * * Returns: * zero on success, negative if failed. */ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; struct drm_i915_private *i915 = gvt->gt->i915; int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); int ret; gvt->mmio.mmio_attribute = vzalloc(size); if (!gvt->mmio.mmio_attribute) return -ENOMEM; ret = init_mmio_info(gvt); if (ret) goto err; ret = init_mmio_block_handlers(gvt); if (ret) goto err; ret = init_generic_mmio_info(gvt); if (ret) goto err; if (IS_BROADWELL(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; } else if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; } else if (IS_BROXTON(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; ret = init_bxt_mmio_info(gvt); if (ret) goto err; } return 0; err: intel_gvt_clean_mmio_info(gvt); return ret; } /** * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio * @gvt: a GVT device * @handler: the handler * @data: private data given to handler * * Returns: * Zero on success, negative error code if failed. */ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; hash_for_each(gvt->mmio.mmio_info_table, i, e, node) { ret = handler(gvt, e->offset, data); if (ret) return ret; } for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { /* pvinfo data doesn't come from hw mmio */ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) continue; for (j = 0; j < block->size; j += 4) { ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); if (ret) return ret; } } return 0; } /** * intel_vgpu_default_mmio_read - default MMIO read handler * @vgpu: a vGPU * @offset: access offset * @p_data: data return buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { read_vreg(vgpu, offset, p_data, bytes); return 0; } /** * intel_vgpu_default_mmio_write() - default MMIO write handler * @vgpu: a vGPU * @offset: access offset * @p_data: write data buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { write_vreg(vgpu, offset, p_data, bytes); return 0; } /** * intel_vgpu_mask_mmio_write - write mask register * @vgpu: a vGPU * @offset: access offset * @p_data: write data buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 mask, old_vreg; old_vreg = vgpu_vreg(vgpu, offset); write_vreg(vgpu, offset, p_data, bytes); mask = vgpu_vreg(vgpu, offset) >> 16; vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | (vgpu_vreg(vgpu, offset) & mask); return 0; } /** * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be * force-nopriv register * * @gvt: a GVT device * @offset: register offset * * Returns: * True if the register is in force-nonpriv whitelist; * False if outside; */ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, unsigned int offset) { return in_whitelist(offset); } /** * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers * @vgpu: a vGPU * @offset: register offset * @pdata: data buffer * @bytes: data length * @is_read: read or write * * Returns: * Zero on success, negative error code if failed. */ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, void *pdata, unsigned int bytes, bool is_read) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; if (drm_WARN_ON(&i915->drm, bytes > 8)) return -EINVAL; /* * Handle special MMIO blocks. */ mmio_block = find_mmio_block(gvt, offset); if (mmio_block) { func = is_read ? mmio_block->read : mmio_block->write; if (func) return func(vgpu, offset, pdata, bytes); goto default_rw; } /* * Normal tracked MMIOs. */ mmio_info = intel_gvt_find_mmio_info(gvt, offset); if (!mmio_info) { gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes); goto default_rw; } if (is_read) return mmio_info->read(vgpu, offset, pdata, bytes); else { u64 ro_mask = mmio_info->ro_mask; u32 old_vreg = 0; u64 data = 0; if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { old_vreg = vgpu_vreg(vgpu, offset); } if (likely(!ro_mask)) ret = mmio_info->write(vgpu, offset, pdata, bytes); else if (!~ro_mask) { gvt_vgpu_err("try to write RO reg %x\n", offset); return 0; } else { /* keep the RO bits in the virtual register */ memcpy(&data, pdata, bytes); data &= ~ro_mask; data |= vgpu_vreg(vgpu, offset) & ro_mask; ret = mmio_info->write(vgpu, offset, &data, bytes); } /* higher 16bits of mode ctl regs are mask bits for change */ if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { u32 mask = vgpu_vreg(vgpu, offset) >> 16; vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | (vgpu_vreg(vgpu, offset) & mask); } } return ret; default_rw: return is_read ? intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); } void intel_gvt_restore_fence(struct intel_gvt *gvt) { struct intel_vgpu *vgpu; int i, id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { mmio_hw_access_pre(gvt->gt); for (i = 0; i < vgpu_fence_sz(vgpu); i++) intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); mmio_hw_access_post(gvt->gt); } } static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data) { struct intel_vgpu *vgpu = data; struct drm_i915_private *dev_priv = gvt->gt->i915; if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset)); return 0; } void intel_gvt_restore_mmio(struct intel_gvt *gvt) { struct intel_vgpu *vgpu; int id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { mmio_hw_access_pre(gvt->gt); intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); mmio_hw_access_post(gvt->gt); } }
linux-master
drivers/gpu/drm/i915/gvt/handlers.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include <drm/drm_blend.h> #include "i915_drv.h" #include "i915_fixed.h" #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_pcode.h" #include "intel_wm.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" static void skl_sagv_disable(struct drm_i915_private *i915); /* Stores plane specific WM parameters */ struct skl_wm_params { bool x_tiled, y_tiled; bool rc_surface; bool is_planar; u32 width; u8 cpp; u32 plane_pixel_rate; u32 y_min_scanlines; u32 plane_bytes_per_line; uint_fixed_16_16_t plane_blocks_per_line; uint_fixed_16_16_t y_tile_minimum; u32 linetime_us; u32 dbuf_block_size; }; u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915) { u8 enabled_slices = 0; enum dbuf_slice slice; for_each_dbuf_slice(i915, slice) { if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE) enabled_slices |= BIT(slice); } return enabled_slices; } /* * FIXME: We still don't have the proper code detect if we need to apply the WA, * so assume we'll always need it in order to avoid underruns. */ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) { return DISPLAY_VER(i915) == 9; } static bool intel_has_sagv(struct drm_i915_private *i915) { return HAS_SAGV(i915) && i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; } static u32 intel_sagv_block_time(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 14) { u32 val; val = intel_de_read(i915, MTL_LATENCY_SAGV); return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val); } else if (DISPLAY_VER(i915) >= 12) { u32 val = 0; int ret; ret = snb_pcode_read(&i915->uncore, GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, &val, NULL); if (ret) { drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n"); return 0; } return val; } else if (DISPLAY_VER(i915) == 11) { return 10; } else if (HAS_SAGV(i915)) { return 30; } else { return 0; } } static void intel_sagv_init(struct drm_i915_private *i915) { if (!HAS_SAGV(i915)) i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; /* * Probe to see if we have working SAGV control. * For icl+ this was already determined by intel_bw_init_hw(). */ if (DISPLAY_VER(i915) < 11) skl_sagv_disable(i915); drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN); i915->display.sagv.block_time_us = intel_sagv_block_time(i915); drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n", str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us); /* avoid overflow when adding with wm0 latency/etc. */ if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX, "Excessive SAGV block time %u, ignoring\n", i915->display.sagv.block_time_us)) i915->display.sagv.block_time_us = 0; if (!intel_has_sagv(i915)) i915->display.sagv.block_time_us = 0; } /* * SAGV dynamically adjusts the system agent voltage and clock frequencies * depending on power and performance requirements. The display engine access * to system memory is blocked during the adjustment time. Because of the * blocking time, having this enabled can cause full system hangs and/or pipe * underruns if we don't meet all of the following requirements: * * - <= 1 pipe enabled * - All planes can enable watermarks for latencies >= SAGV engine block time * - We're not using an interlaced display configuration */ static void skl_sagv_enable(struct drm_i915_private *i915) { int ret; if (!intel_has_sagv(i915)) return; if (i915->display.sagv.status == I915_SAGV_ENABLED) return; drm_dbg_kms(&i915->drm, "Enabling SAGV\n"); ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); /* We don't need to wait for SAGV when enabling */ /* * Some skl systems, pre-release machines in particular, * don't actually have SAGV. */ if (IS_SKYLAKE(i915) && ret == -ENXIO) { drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; return; } else if (ret < 0) { drm_err(&i915->drm, "Failed to enable SAGV\n"); return; } i915->display.sagv.status = I915_SAGV_ENABLED; } static void skl_sagv_disable(struct drm_i915_private *i915) { int ret; if (!intel_has_sagv(i915)) return; if (i915->display.sagv.status == I915_SAGV_DISABLED) return; drm_dbg_kms(&i915->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); /* * Some skl systems, pre-release machines in particular, * don't actually have SAGV. */ if (IS_SKYLAKE(i915) && ret == -ENXIO) { drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; return; } else if (ret < 0) { drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret); return; } i915->display.sagv.status = I915_SAGV_DISABLED; } static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); if (!new_bw_state) return; if (!intel_can_enable_sagv(i915, new_bw_state)) skl_sagv_disable(i915); } static void skl_sagv_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); if (!new_bw_state) return; if (intel_can_enable_sagv(i915, new_bw_state)) skl_sagv_enable(i915); } static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); u16 old_mask, new_mask; if (!new_bw_state) return; old_mask = old_bw_state->qgv_points_mask; new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; if (old_mask == new_mask) return; WARN_ON(!new_bw_state->base.changed); drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n", old_mask, new_mask); /* * Restrict required qgv points before updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ icl_pcode_restrict_qgv_points(i915, new_mask); } static void icl_sagv_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); u16 old_mask, new_mask; if (!new_bw_state) return; old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; new_mask = new_bw_state->qgv_points_mask; if (old_mask == new_mask) return; WARN_ON(!new_bw_state->base.changed); drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", old_mask, new_mask); /* * Allow required qgv points after updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ icl_pcode_restrict_qgv_points(i915, new_mask); } void intel_sagv_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); /* * Just return if we can't control SAGV or don't have it. * This is different from situation when we have SAGV but just can't * afford it due to DBuf limitation - in case if SAGV is completely * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ if (!intel_has_sagv(i915)) return; if (DISPLAY_VER(i915) >= 11) icl_sagv_pre_plane_update(state); else skl_sagv_pre_plane_update(state); } void intel_sagv_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); /* * Just return if we can't control SAGV or don't have it. * This is different from situation when we have SAGV but just can't * afford it due to DBuf limitation - in case if SAGV is completely * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ if (!intel_has_sagv(i915)) return; if (DISPLAY_VER(i915) >= 11) icl_sagv_post_plane_update(state); else skl_sagv_post_plane_update(state); } static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum plane_id plane_id; int max_level = INT_MAX; if (!intel_has_sagv(i915)) return false; if (!crtc_state->hw.active) return true; if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; int level; /* Skip this plane if it's not enabled */ if (!wm->wm[0].enable) continue; /* Find the highest enabled wm level for this plane */ for (level = i915->display.wm.num_levels - 1; !wm->wm[level].enable; --level) { } /* Highest common enabled wm level for all planes */ max_level = min(level, max_level); } /* No enabled planes? */ if (max_level == INT_MAX) return true; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; /* * All enabled planes must have enabled a common wm level that * can tolerate memory latencies higher than sagv_block_time_us */ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) return false; } return true; } static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum plane_id plane_id; if (!crtc_state->hw.active) return true; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (wm->wm[0].enable && !wm->sagv.wm0.enable) return false; } return true; } static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (!i915->params.enable_sagv) return false; if (DISPLAY_VER(i915) >= 12) return tgl_crtc_can_enable_sagv(crtc_state); else return skl_crtc_can_enable_sagv(crtc_state); } bool intel_can_enable_sagv(struct drm_i915_private *i915, const struct intel_bw_state *bw_state) { if (DISPLAY_VER(i915) < 11 && bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) return false; return bw_state->pipe_sagv_reject == 0; } static int intel_compute_sagv_mask(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); int ret; struct intel_crtc *crtc; struct intel_crtc_state *new_crtc_state; struct intel_bw_state *new_bw_state = NULL; const struct intel_bw_state *old_bw_state = NULL; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); if (intel_crtc_can_enable_sagv(new_crtc_state)) new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); else new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); } if (!new_bw_state) return 0; new_bw_state->active_pipes = intel_calc_active_pipes(state, old_bw_state->active_pipes); if (new_bw_state->active_pipes != old_bw_state->active_pipes) { ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; } if (intel_can_enable_sagv(i915, new_bw_state) != intel_can_enable_sagv(i915, old_bw_state)) { ret = intel_atomic_serialize_global_state(&new_bw_state->base); if (ret) return ret; } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; } for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; /* * We store use_sagv_wm in the crtc state rather than relying on * that bw state since we have no convenient way to get at the * latter from the plane commit hooks (especially in the legacy * cursor case) */ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && DISPLAY_VER(i915) >= 12 && intel_can_enable_sagv(i915, new_bw_state); } return 0; } static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, u16 start, u16 end) { entry->start = start; entry->end = end; return end; } static int intel_dbuf_slice_size(struct drm_i915_private *i915) { return DISPLAY_INFO(i915)->dbuf.size / hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask); } static void skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, struct skl_ddb_entry *ddb) { int slice_size = intel_dbuf_slice_size(i915); if (!slice_mask) { ddb->start = 0; ddb->end = 0; return; } ddb->start = (ffs(slice_mask) - 1) * slice_size; ddb->end = fls(slice_mask) * slice_size; WARN_ON(ddb->start >= ddb->end); WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size); } static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) { struct skl_ddb_entry ddb; if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2))) slice_mask = BIT(DBUF_S1); else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) slice_mask = BIT(DBUF_S3); skl_ddb_entry_for_slices(i915, slice_mask, &ddb); return ddb.start; } u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, const struct skl_ddb_entry *entry) { int slice_size = intel_dbuf_slice_size(i915); enum dbuf_slice start_slice, end_slice; u8 slice_mask = 0; if (!skl_ddb_entry_size(entry)) return 0; start_slice = entry->start / slice_size; end_slice = (entry->end - 1) / slice_size; /* * Per plane DDB entry can in a really worst case be on multiple slices * but single entry is anyway contigious. */ while (start_slice <= end_slice) { slice_mask |= BIT(start_slice); start_slice++; } return slice_mask; } static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) { const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int hdisplay, vdisplay; if (!crtc_state->hw.active) return 0; /* * Watermark/ddb requirement highly depends upon width of the * framebuffer, So instead of allocating DDB equally among pipes * distribute DDB based on resolution/width of the display. */ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); return hdisplay; } static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, enum pipe for_pipe, unsigned int *weight_start, unsigned int *weight_end, unsigned int *weight_total) { struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); enum pipe pipe; *weight_start = 0; *weight_end = 0; *weight_total = 0; for_each_pipe(i915, pipe) { int weight = dbuf_state->weight[pipe]; /* * Do not account pipes using other slice sets * luckily as of current BSpec slice sets do not partially * intersect(pipes share either same one slice or same slice set * i.e no partial intersection), so it is enough to check for * equality for now. */ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) continue; *weight_total += weight; if (pipe < for_pipe) { *weight_start += weight; *weight_end += weight; } else if (pipe == for_pipe) { *weight_end += weight; } } } static int skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); unsigned int weight_total, weight_start, weight_end; const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); struct intel_crtc_state *crtc_state; struct skl_ddb_entry ddb_slices; enum pipe pipe = crtc->pipe; unsigned int mbus_offset = 0; u32 ddb_range_size; u32 dbuf_slice_mask; u32 start, end; int ret; if (new_dbuf_state->weight[pipe] == 0) { skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); goto out; } dbuf_slice_mask = new_dbuf_state->slices[pipe]; skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); ddb_range_size = skl_ddb_entry_size(&ddb_slices); intel_crtc_dbuf_weights(new_dbuf_state, pipe, &weight_start, &weight_end, &weight_total); start = ddb_range_size * weight_start / weight_total; end = ddb_range_size * weight_end / weight_total; skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], ddb_slices.start - mbus_offset + start, ddb_slices.start - mbus_offset + end); out: if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], &new_dbuf_state->ddb[pipe])) return 0; ret = intel_atomic_lock_global_state(&new_dbuf_state->base); if (ret) return ret; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); /* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets. */ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", crtc->base.base.id, crtc->base.name, old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state->active_pipes, new_dbuf_state->active_pipes); return 0; } static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane); static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */); static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, const struct skl_wm_params *wp) { unsigned int latency = i915->display.wm.skl_latency[level]; if (latency == 0) return 0; /* * WaIncreaseLatencyIPCEnabled: kbl,cfl * Display WA #1141: kbl,cfl */ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && skl_watermark_ipc_enabled(i915)) latency += 4; if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled) latency += 15; return latency; } static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) { struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; struct skl_wm_params wp; int level; ret = skl_compute_wm_params(crtc_state, 256, drm_format_info(DRM_FORMAT_ARGB8888), DRM_FORMAT_MOD_LINEAR, DRM_MODE_ROTATE_0, crtc_state->pixel_rate, &wp, 0); drm_WARN_ON(&i915->drm, ret); for (level = 0; level < i915->display.wm.num_levels; level++) { unsigned int latency = skl_wm_latency(i915, level, &wp); skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) break; min_ddb_alloc = wm.min_ddb_alloc; } return max(num_active == 1 ? 32 : 8, min_ddb_alloc); } static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) { skl_ddb_entry_init(entry, REG_FIELD_GET(PLANE_BUF_START_MASK, reg), REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); if (entry->end) entry->end++; } static void skl_ddb_get_hw_plane_state(struct drm_i915_private *i915, const enum pipe pipe, const enum plane_id plane_id, struct skl_ddb_entry *ddb, struct skl_ddb_entry *ddb_y) { u32 val; /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { val = intel_de_read(i915, CUR_BUF_CFG(pipe)); skl_ddb_entry_init_from_hw(ddb, val); return; } val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id)); skl_ddb_entry_init_from_hw(ddb, val); if (DISPLAY_VER(i915) >= 11) return; val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id)); skl_ddb_entry_init_from_hw(ddb_y, val); } static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct skl_ddb_entry *ddb, struct skl_ddb_entry *ddb_y) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; enum plane_id plane_id; power_domain = POWER_DOMAIN_PIPE(pipe); wakeref = intel_display_power_get_if_enabled(i915, power_domain); if (!wakeref) return; for_each_plane_id_on_crtc(crtc, plane_id) skl_ddb_get_hw_plane_state(i915, pipe, plane_id, &ddb[plane_id], &ddb_y[plane_id]); intel_display_power_put(i915, power_domain, wakeref); } struct dbuf_slice_conf_entry { u8 active_pipes; u8 dbuf_mask[I915_MAX_PIPES]; bool join_mbus; }; /* * Table taken from Bspec 12716 * Pipes do have some preferred DBuf slice affinity, * plus there are some hardcoded requirements on how * those should be distributed for multipipe scenarios. * For more DBuf slices algorithm can get even more messy * and less readable, so decided to use a table almost * as is from BSpec itself - that way it is at least easier * to compare, change and check. */ static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = /* Autogenerated with igt/tools/intel_dbuf_map tool: */ { { .active_pipes = BIT(PIPE_A), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), }, }, { .active_pipes = BIT(PIPE_B), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_C), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, {} }; /* * Table taken from Bspec 49255 * Pipes do have some preferred DBuf slice affinity, * plus there are some hardcoded requirements on how * those should be distributed for multipipe scenarios. * For more DBuf slices algorithm can get even more messy * and less readable, so decided to use a table almost * as is from BSpec itself - that way it is at least easier * to compare, change and check. */ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = /* Autogenerated with igt/tools/intel_dbuf_map tool: */ { { .active_pipes = BIT(PIPE_A), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S2), [PIPE_B] = BIT(DBUF_S1), }, }, { .active_pipes = BIT(PIPE_C), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_D), .dbuf_mask = { [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S1), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S1), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S1), [PIPE_C] = BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S2), }, }, {} }; static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { { .active_pipes = BIT(PIPE_A), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_C), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_D), .dbuf_mask = { [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S3), [PIPE_D] = BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3), [PIPE_D] = BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3), [PIPE_D] = BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1), [PIPE_B] = BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3), [PIPE_D] = BIT(DBUF_S4), }, }, {} }; static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { /* * Keep the join_mbus cases first so check_mbus_joined() * will prefer them over the !join_mbus cases. */ { .active_pipes = BIT(PIPE_A), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), }, .join_mbus = true, }, { .active_pipes = BIT(PIPE_B), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), }, .join_mbus = true, }, { .active_pipes = BIT(PIPE_A), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), }, .join_mbus = false, }, { .active_pipes = BIT(PIPE_B), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), }, .join_mbus = false, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_C), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), }, }, { .active_pipes = BIT(PIPE_D), .dbuf_mask = { [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), }, }, {} }; static bool check_mbus_joined(u8 active_pipes, const struct dbuf_slice_conf_entry *dbuf_slices) { int i; for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } return false; } static bool adlp_check_mbus_joined(u8 active_pipes) { return check_mbus_joined(active_pipes, adlp_allowed_dbufs); } static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, const struct dbuf_slice_conf_entry *dbuf_slices) { int i; for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes && dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; } return 0; } /* * This function finds an entry with same enabled pipe configuration and * returns correspondent DBuf slice mask as stated in BSpec for particular * platform. */ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { /* * FIXME: For ICL this is still a bit unclear as prev BSpec revision * required calculating "pipe ratio" in order to determine * if one or two slices can be used for single pipe configurations * as additional constraint to the existing table. * However based on recent info, it should be not "pipe ratio" * but rather ratio between pixel_rate and cdclk with additional * constants, so for now we are using only table until this is * clarified. Also this is the reason why crtc_state param is * still here - we will need it once those additional constraints * pop up. */ return compute_dbuf_slices(pipe, active_pipes, join_mbus, icl_allowed_dbufs); } static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { return compute_dbuf_slices(pipe, active_pipes, join_mbus, tgl_allowed_dbufs); } static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { return compute_dbuf_slices(pipe, active_pipes, join_mbus, adlp_allowed_dbufs); } static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { return compute_dbuf_slices(pipe, active_pipes, join_mbus, dg2_allowed_dbufs); } static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (IS_DG2(i915)) return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(i915) >= 13) return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(i915) == 12) return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(i915) == 11) return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); /* * For anything else just return one slice yet. * Should be extended for other platforms. */ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; } static bool use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); return DISPLAY_VER(i915) >= 13 && crtc_state->uapi.async_flip && plane->async_flip; } static u64 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum plane_id plane_id; u64 data_rate = 0; for_each_plane_id_on_crtc(crtc, plane_id) { if (plane_id == PLANE_CURSOR) continue; data_rate += crtc_state->rel_data_rate[plane_id]; if (DISPLAY_VER(i915) < 11) data_rate += crtc_state->rel_data_rate_y[plane_id]; } return data_rate; } static const struct skl_wm_level * skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, enum plane_id plane_id, int level) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; if (level == 0 && pipe_wm->use_sagv_wm) return &wm->sagv.wm0; return &wm->wm[level]; } static const struct skl_wm_level * skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, enum plane_id plane_id) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; if (pipe_wm->use_sagv_wm) return &wm->sagv.trans_wm; return &wm->trans_wm; } /* * We only disable the watermarks for each plane if * they exceed the ddb allocation of said plane. This * is done so that we don't end up touching cursor * watermarks needlessly when some other plane reduces * our max possible watermark level. * * Bspec has this to say about the PLANE_WM enable bit: * "All the watermarks at this level for all enabled * planes must be enabled before the level will be used." * So this is actually safe to do. */ static void skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) { if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) memset(wm, 0, sizeof(*wm)); } static void skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) { if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { memset(wm, 0, sizeof(*wm)); memset(uv_wm, 0, sizeof(*uv_wm)); } } static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, const struct skl_plane_wm *wm) { /* * Wa_1408961008:icl, ehl * Wa_14012656716:tgl, adl * Wa_14017887344:icl * Wa_14017868169:adl, tgl * Due to some power saving optimizations, different subsystems * like PSR, might still use even disabled wm level registers, * for "reference", so lets keep at least the values sane. * Considering amount of WA requiring us to do similar things, was * decided to simply do it for all of the platforms, as those wm * levels are disabled, this isn't going to do harm anyway. */ return level > 0 && !wm->wm[level].enable; } struct skl_plane_ddb_iter { u64 data_rate; u16 start, size; }; static void skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, struct skl_ddb_entry *ddb, const struct skl_wm_level *wm, u64 data_rate) { u16 size, extra = 0; if (data_rate) { extra = min_t(u16, iter->size, DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate)); iter->size -= extra; iter->data_rate -= data_rate; } /* * Keep ddb entry of all disabled planes explicitly zeroed * to avoid skl_ddb_add_affected_planes() adding them to * the state when other planes change their allocations. */ size = wm->min_ddb_alloc + extra; if (size) iter->start = skl_ddb_entry_init(ddb, iter->start, iter->start + size); } static int skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_dbuf_state *dbuf_state = intel_atomic_get_new_dbuf_state(state); const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; int num_active = hweight8(dbuf_state->active_pipes); struct skl_plane_ddb_iter iter; enum plane_id plane_id; u16 cursor_size; u32 blocks; int level; /* Clear the partitioning for disabled planes. */ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb)); memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); if (!crtc_state->hw.active) return 0; iter.start = alloc->start; iter.size = skl_ddb_entry_size(alloc); if (iter.size == 0) return 0; /* Allocate fixed number of blocks for cursor. */ cursor_size = skl_cursor_allocation(crtc_state, num_active); iter.size -= cursor_size; skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], alloc->end - cursor_size, alloc->end); iter.data_rate = skl_total_relative_data_rate(crtc_state); /* * Find the highest watermark level for which we can satisfy the block * requirement of active planes. */ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { blocks = 0; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { drm_WARN_ON(&i915->drm, wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; break; } continue; } blocks += wm->wm[level].min_ddb_alloc; blocks += wm->uv_wm[level].min_ddb_alloc; } if (blocks <= iter.size) { iter.size -= blocks; break; } } if (level < 0) { drm_dbg_kms(&i915->drm, "Requested display configuration exceeds system DDB limitations"); drm_dbg_kms(&i915->drm, "minimum required %d/%d\n", blocks, iter.size); return -EINVAL; } /* avoid the WARN later when we don't allocate any extra DDB */ if (iter.data_rate == 0) iter.size = 0; /* * Grant each plane the blocks it requires at the highest achievable * watermark level, plus an extra share of the leftover blocks * proportional to its relative data rate. */ for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) continue; if (DISPLAY_VER(i915) < 11 && crtc_state->nv12_planes & BIT(plane_id)) { skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], crtc_state->rel_data_rate_y[plane_id]); skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], crtc_state->rel_data_rate[plane_id]); } else { skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], crtc_state->rel_data_rate[plane_id]); } } drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0); /* * When we calculated watermark values we didn't know how high * of a level we'd actually be able to hit, so we just marked * all levels as "enabled." Go back now and disable the ones * that aren't actually possible. */ for (level++; level < i915->display.wm.num_levels; level++) { for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (DISPLAY_VER(i915) < 11 && crtc_state->nv12_planes & BIT(plane_id)) skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], ddb_y, ddb); else skl_check_wm_level(&wm->wm[level], ddb); if (skl_need_wm_copy_wa(i915, level, wm)) { wm->wm[level].blocks = wm->wm[level - 1].blocks; wm->wm[level].lines = wm->wm[level - 1].lines; wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; } } } /* * Go back and disable the transition and SAGV watermarks * if it turns out we don't have enough DDB blocks for them. */ for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (DISPLAY_VER(i915) < 11 && crtc_state->nv12_planes & BIT(plane_id)) { skl_check_wm_level(&wm->trans_wm, ddb_y); } else { WARN_ON(skl_ddb_entry_size(ddb_y)); skl_check_wm_level(&wm->trans_wm, ddb); } skl_check_wm_level(&wm->sagv.wm0, ddb); skl_check_wm_level(&wm->sagv.trans_wm, ddb); } return 0; } /* * The max latency should be 257 (max the punit can code is 255 and we add 2us * for the read latency) and cpp should always be <= 8, so that * should allow pixel_rate up to ~2 GHz which seems sufficient since max * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ static uint_fixed_16_16_t skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, u8 cpp, u32 latency, u32 dbuf_block_size) { u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) return FP_16_16_MAX; wm_intermediate_val = latency * pixel_rate * cpp; ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); if (DISPLAY_VER(i915) >= 10) ret = add_fixed16_u32(ret, 1); return ret; } static uint_fixed_16_16_t skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, uint_fixed_16_16_t plane_blocks_per_line) { u32 wm_intermediate_val; uint_fixed_16_16_t ret; if (latency == 0) return FP_16_16_MAX; wm_intermediate_val = latency * pixel_rate; wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000); ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); return ret; } static uint_fixed_16_16_t intel_get_linetime_us(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); u32 pixel_rate; u32 crtc_htotal; uint_fixed_16_16_t linetime_us; if (!crtc_state->hw.active) return u32_to_fixed16(0); pixel_rate = crtc_state->pixel_rate; if (drm_WARN_ON(&i915->drm, pixel_rate == 0)) return u32_to_fixed16(0); crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; } static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 interm_pbpl; /* only planar format has two planes */ if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(format, modifier)) { drm_dbg_kms(&i915->drm, "Non planar format have single plane\n"); return -EINVAL; } wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED && intel_fb_is_tiled_modifier(modifier); wp->rc_surface = intel_fb_is_ccs_modifier(modifier); wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); wp->width = width; if (color_plane == 1 && wp->is_planar) wp->width /= 2; wp->cpp = format->cpp[color_plane]; wp->plane_pixel_rate = plane_pixel_rate; if (DISPLAY_VER(i915) >= 11 && modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) wp->dbuf_block_size = 256; else wp->dbuf_block_size = 512; if (drm_rotation_90_or_270(rotation)) { switch (wp->cpp) { case 1: wp->y_min_scanlines = 16; break; case 2: wp->y_min_scanlines = 8; break; case 4: wp->y_min_scanlines = 4; break; default: MISSING_CASE(wp->cpp); return -EINVAL; } } else { wp->y_min_scanlines = 4; } if (skl_needs_memory_bw_wa(i915)) wp->y_min_scanlines *= 2; wp->plane_bytes_per_line = wp->width * wp->cpp; if (wp->y_tiled) { interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * wp->y_min_scanlines, wp->dbuf_block_size); if (DISPLAY_VER(i915) >= 10) interm_pbpl++; wp->plane_blocks_per_line = div_fixed16(interm_pbpl, wp->y_min_scanlines); } else { interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, wp->dbuf_block_size); if (!wp->x_tiled || DISPLAY_VER(i915) >= 10) interm_pbpl++; wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); } wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, wp->plane_blocks_per_line); wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state)); return 0; } static int skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct skl_wm_params *wp, int color_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; int width; /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ width = drm_rect_width(&plane_state->uapi.src) >> 16; return skl_compute_wm_params(crtc_state, width, fb->format, fb->modifier, plane_state->hw.rotation, intel_plane_pixel_rate(crtc_state, plane_state), wp, color_plane); } static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) { if (DISPLAY_VER(i915) >= 10) return true; /* The number of lines are ignored for the level 0 watermark. */ return level > 0; } static int skl_wm_max_lines(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 13) return 255; else return 31; } static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; u32 blocks, lines, min_ddb_alloc = 0; if (latency == 0 || (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; } method1 = skl_wm_method1(i915, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, latency, wp->plane_blocks_per_line); if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; } else if (latency >= wp->linetime_us) { if (DISPLAY_VER(i915) == 9) selected_result = min_fixed16(method1, method2); else selected_result = method2; } else { selected_result = method1; } } blocks = fixed16_to_u32_round_up(selected_result) + 1; /* * Lets have blocks at minimum equivalent to plane_blocks_per_line * as there will be at minimum one line for lines configuration. This * is a work around for FIFO underruns observed with resolutions like * 4k 60 Hz in single channel DRAM configurations. * * As per the Bspec 49325, if the ddb allocation can hold at least * one plane_blocks_per_line, we should have selected method2 in * the above logic. Assuming that modern versions have enough dbuf * and method2 guarantees blocks equivalent to at least 1 line, * select the blocks as plane_blocks_per_line. * * TODO: Revisit the logic when we have better understanding on DRAM * channels' impact on the level 0 memory latency and the relevant * wm calculations. */ if (skl_wm_has_lines(i915, level)) blocks = max(blocks, fixed16_to_u32_round_up(wp->plane_blocks_per_line)); lines = div_round_up_fixed16(selected_result, wp->plane_blocks_per_line); if (DISPLAY_VER(i915) == 9) { /* Display WA #1125: skl,bxt,kbl */ if (level == 0 && wp->rc_surface) blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); /* Display WA #1126: skl,bxt,kbl */ if (level >= 1 && level <= 7) { if (wp->y_tiled) { blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); lines += wp->y_min_scanlines; } else { blocks++; } /* * Make sure result blocks for higher latency levels are * at least as high as level below the current level. * Assumption in DDB algorithm optimization for special * cases. Also covers Display WA #1125 for RC. */ if (result_prev->blocks > blocks) blocks = result_prev->blocks; } } if (DISPLAY_VER(i915) >= 11) { if (wp->y_tiled) { int extra_lines; if (lines % wp->y_min_scanlines == 0) extra_lines = wp->y_min_scanlines; else extra_lines = wp->y_min_scanlines * 2 - lines % wp->y_min_scanlines; min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, wp->plane_blocks_per_line); } else { min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); } } if (!skl_wm_has_lines(i915, level)) lines = 0; if (lines > skl_wm_max_lines(i915)) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; } /* * If lines is valid, assume we can use this watermark level * for now. We'll come back and disable it after we calculate the * DDB allocation if it turns out we don't actually have enough * blocks to satisfy it. */ result->blocks = blocks; result->lines = lines; /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; result->enable = true; if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us) result->can_sagv = latency >= i915->display.sagv.block_time_us; } static void skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level *result_prev = &levels[0]; int level; for (level = 0; level < i915->display.wm.num_levels; level++) { struct skl_wm_level *result = &levels[level]; unsigned int latency = skl_wm_latency(i915, level, wm_params); skl_compute_plane_wm(crtc_state, plane, level, latency, wm_params, result_prev, result); result_prev = result; } } static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_plane_wm *plane_wm) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; struct skl_wm_level *levels = plane_wm->wm; unsigned int latency = 0; if (i915->display.sagv.block_time_us) latency = i915->display.sagv.block_time_us + skl_wm_latency(i915, 0, wm_params); skl_compute_plane_wm(crtc_state, plane, 0, latency, wm_params, &levels[0], sagv_wm); } static void skl_compute_transition_wm(struct drm_i915_private *i915, struct skl_wm_level *trans_wm, const struct skl_wm_level *wm0, const struct skl_wm_params *wp) { u16 trans_min, trans_amount, trans_y_tile_min; u16 wm0_blocks, trans_offset, blocks; /* Transition WM don't make any sense if ipc is disabled */ if (!skl_watermark_ipc_enabled(i915)) return; /* * WaDisableTWM:skl,kbl,cfl,bxt * Transition WM are not recommended by HW team for GEN9 */ if (DISPLAY_VER(i915) == 9) return; if (DISPLAY_VER(i915) >= 11) trans_min = 4; else trans_min = 14; /* Display WA #1140: glk,cnl */ if (DISPLAY_VER(i915) == 10) trans_amount = 0; else trans_amount = 10; /* This is configurable amount */ trans_offset = trans_min + trans_amount; /* * The spec asks for Selected Result Blocks for wm0 (the real value), * not Result Blocks (the integer value). Pay attention to the capital * letters. The value wm_l0->blocks is actually Result Blocks, but * since Result Blocks is the ceiling of Selected Result Blocks plus 1, * and since we later will have to get the ceiling of the sum in the * transition watermarks calculation, we can just pretend Selected * Result Blocks is Result Blocks minus 1 and it should work for the * current platforms. */ wm0_blocks = wm0->blocks - 1; if (wp->y_tiled) { trans_y_tile_min = (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; } else { blocks = wm0_blocks + trans_offset; } blocks++; /* * Just assume we can enable the transition watermark. After * computing the DDB we'll come back and disable it if that * assumption turns out to be false. */ trans_wm->blocks = blocks; trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); trans_wm->enable = true; } static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct intel_plane *plane, int color_plane) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; ret = skl_compute_plane_wm_params(crtc_state, plane_state, &wm_params, color_plane); if (ret) return ret; skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); skl_compute_transition_wm(i915, &wm->trans_wm, &wm->wm[0], &wm_params); if (DISPLAY_VER(i915) >= 12) { tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); skl_compute_transition_wm(i915, &wm->sagv.trans_wm, &wm->sagv.wm0, &wm_params); } return 0; } static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct intel_plane *plane) { struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; wm->is_planar = true; /* uv plane watermarks must also be validated for NV12/Planar */ ret = skl_compute_plane_wm_params(crtc_state, plane_state, &wm_params, 1); if (ret) return ret; skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); return 0; } static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); enum plane_id plane_id = plane->id; struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; memset(wm, 0, sizeof(*wm)); if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; ret = skl_build_plane_wm_single(crtc_state, plane_state, plane, 0); if (ret) return ret; if (fb->format->is_yuv && fb->format->num_planes > 1) { ret = skl_build_plane_wm_uv(crtc_state, plane_state, plane); if (ret) return ret; } return 0; } static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; /* Watermarks calculated in master */ if (plane_state->planar_slave) return 0; memset(wm, 0, sizeof(*wm)); if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; drm_WARN_ON(&i915->drm, !intel_wm_plane_visible(crtc_state, plane_state)); drm_WARN_ON(&i915->drm, !fb->format->is_yuv || fb->format->num_planes == 1); ret = skl_build_plane_wm_single(crtc_state, plane_state, plane_state->planar_linked_plane, 0); if (ret) return ret; ret = skl_build_plane_wm_single(crtc_state, plane_state, plane, 1); if (ret) return ret; } else if (intel_wm_plane_visible(crtc_state, plane_state)) { ret = skl_build_plane_wm_single(crtc_state, plane_state, plane, 0); if (ret) return ret; } return 0; } static bool skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state, int wm0_lines, int latency) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* FIXME missing scaler and DSC pre-fill time */ return crtc_state->framestart_delay + intel_usecs_to_scanlines(adjusted_mode, latency) + wm0_lines > adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start; } static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum plane_id plane_id; int wm0_lines = 0; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; /* FIXME what about !skl_wm_has_lines() platforms? */ wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines); } return wm0_lines; } static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, int wm0_lines) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); int level; for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { int latency; /* FIXME should we care about the latency w/a's? */ latency = skl_wm_latency(i915, level, NULL); if (latency == 0) continue; /* FIXME is it correct to use 0 latency for wm0 here? */ if (level == 0) latency = 0; if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency)) return level; } return -EINVAL; } static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); int wm0_lines, level; if (!crtc_state->hw.active) return 0; wm0_lines = skl_max_wm0_lines(crtc_state); level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines); if (level < 0) return level; /* * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* * based on whether we're limited by the vblank duration. */ crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1; for (level++; level < i915->display.wm.num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; /* * FIXME just clear enable or flag the entire * thing as bad via min_ddb_alloc=U16_MAX? */ wm->wm[level].enable = false; wm->uv_wm[level].enable = false; } } if (DISPLAY_VER(i915) >= 12 && i915->display.sagv.block_time_us && skl_is_vblank_too_short(crtc_state, wm0_lines, i915->display.sagv.block_time_us)) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; wm->sagv.wm0.enable = false; wm->sagv.trans_wm.enable = false; } } return 0; } static int skl_build_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; struct intel_plane *plane; int ret, i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { /* * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc * instead but we don't populate that correctly for NV12 Y * planes so for now hack this. */ if (plane->pipe != crtc->pipe) continue; if (DISPLAY_VER(i915) >= 11) ret = icl_build_plane_wm(crtc_state, plane_state); else ret = skl_build_plane_wm(crtc_state, plane_state); if (ret) return ret; } crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; return skl_wm_check_vblank(crtc_state); } static void skl_ddb_entry_write(struct drm_i915_private *i915, i915_reg_t reg, const struct skl_ddb_entry *entry) { if (entry->end) intel_de_write_fw(i915, reg, PLANE_BUF_END(entry->end - 1) | PLANE_BUF_START(entry->start)); else intel_de_write_fw(i915, reg, 0); } static void skl_write_wm_level(struct drm_i915_private *i915, i915_reg_t reg, const struct skl_wm_level *level) { u32 val = 0; if (level->enable) val |= PLANE_WM_EN; if (level->ignore_lines) val |= PLANE_WM_IGNORE_LINES; val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks); val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines); intel_de_write_fw(i915, reg, val); } void skl_write_plane_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; int level; for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), skl_plane_wm_level(pipe_wm, plane_id, level)); skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id), skl_plane_trans_wm(pipe_wm, plane_id)); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id), &wm->sagv.wm0); skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), &wm->sagv.trans_wm); } skl_ddb_entry_write(i915, PLANE_BUF_CFG(pipe, plane_id), ddb); if (DISPLAY_VER(i915) < 11) skl_ddb_entry_write(i915, PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y); } void skl_write_cursor_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; int level; for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, CUR_WM(pipe, level), skl_plane_wm_level(pipe_wm, plane_id, level)); skl_write_wm_level(i915, CUR_WM_TRANS(pipe), skl_plane_trans_wm(pipe_wm, plane_id)); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; skl_write_wm_level(i915, CUR_WM_SAGV(pipe), &wm->sagv.wm0); skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe), &wm->sagv.trans_wm); } skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb); } static bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { return l1->enable == l2->enable && l1->ignore_lines == l2->ignore_lines && l1->lines == l2->lines && l1->blocks == l2->blocks; } static bool skl_plane_wm_equals(struct drm_i915_private *i915, const struct skl_plane_wm *wm1, const struct skl_plane_wm *wm2) { int level; for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required * ddb allocation. */ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) return false; } return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); } static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, const struct skl_ddb_entry *b) { return a->start < b->end && b->start < a->end; } static void skl_ddb_entry_union(struct skl_ddb_entry *a, const struct skl_ddb_entry *b) { if (a->end && b->end) { a->start = min(a->start, b->start); a->end = max(a->end, b->end); } else if (b->end) { a->start = b->start; a->end = b->end; } } bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, const struct skl_ddb_entry *entries, int num_entries, int ignore_idx) { int i; for (i = 0; i < num_entries; i++) { if (i != ignore_idx && skl_ddb_entries_overlap(ddb, &entries[i])) return true; } return false; } static int skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_plane *plane; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { struct intel_plane_state *plane_state; enum plane_id plane_id = plane->id; if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], &new_crtc_state->wm.skl.plane_ddb[plane_id]) && skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) continue; plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); new_crtc_state->async_flip_planes = 0; new_crtc_state->do_async_flip = false; } return 0; } static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) { struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); u8 enabled_slices; enum pipe pipe; /* * FIXME: For now we always enable slice S1 as per * the Bspec display initialization sequence. */ enabled_slices = BIT(DBUF_S1); for_each_pipe(i915, pipe) enabled_slices |= dbuf_state->slices[pipe]; return enabled_slices; } static int skl_compute_ddb(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *old_dbuf_state; struct intel_dbuf_state *new_dbuf_state = NULL; const struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int ret, i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { new_dbuf_state = intel_atomic_get_dbuf_state(state); if (IS_ERR(new_dbuf_state)) return PTR_ERR(new_dbuf_state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); break; } if (!new_dbuf_state) return 0; new_dbuf_state->active_pipes = intel_calc_active_pipes(state, old_dbuf_state->active_pipes); if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { ret = intel_atomic_lock_global_state(&new_dbuf_state->base); if (ret) return ret; } if (HAS_MBUS_JOINING(i915)) new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes); for_each_intel_crtc(&i915->drm, crtc) { enum pipe pipe = crtc->pipe; new_dbuf_state->slices[pipe] = skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, new_dbuf_state->joined_mbus); if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) continue; ret = intel_atomic_lock_global_state(&new_dbuf_state->base); if (ret) return ret; } new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); if (ret) return ret; if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { /* TODO: Implement vblank synchronized MBUS joining changes */ ret = intel_modeset_all_pipes(state, "MBUS joining change"); if (ret) return ret; } drm_dbg_kms(&i915->drm, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices, DISPLAY_INFO(i915)->dbuf.slice_mask, str_yes_no(old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state->joined_mbus)); } for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) continue; ret = intel_atomic_lock_global_state(&new_dbuf_state->base); if (ret) return ret; } for_each_intel_crtc(&i915->drm, crtc) { ret = skl_crtc_allocate_ddb(state, crtc); if (ret) return ret; } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = skl_crtc_allocate_plane_ddb(state, crtc); if (ret) return ret; ret = skl_ddb_add_affected_planes(old_crtc_state, new_crtc_state); if (ret) return ret; } return 0; } static char enast(bool enable) { return enable ? '*' : ' '; } static void skl_print_wm_changes(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state; const struct intel_crtc_state *new_crtc_state; struct intel_plane *plane; struct intel_crtc *crtc; int i; if (!drm_debug_enabled(DRM_UT_KMS)) return; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; old_pipe_wm = &old_crtc_state->wm.skl.optimal; new_pipe_wm = &new_crtc_state->wm.skl.optimal; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; if (skl_ddb_entry_equal(old, new)) continue; drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size(old), skl_ddb_entry_size(new)); } for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_plane_wm *old_wm, *new_wm; old_wm = &old_pipe_wm->planes[plane_id]; new_wm = &new_pipe_wm->planes[plane_id]; if (skl_plane_wm_equals(i915, old_wm, new_wm)) continue; drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", plane->base.base.id, plane->base.name, enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm->trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm.enable)); drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", plane->base.base.id, plane->base.name, enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, old_wm->wm[0].blocks, old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm->wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks, old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm->trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv.trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks, new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm->wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks, new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks); drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc, old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc); } } } static bool skl_plane_selected_wm_equals(struct intel_plane *plane, const struct skl_pipe_wm *old_pipe_wm, const struct skl_pipe_wm *new_pipe_wm) { struct drm_i915_private *i915 = to_i915(plane->base.dev); int level; for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required * ddb allocation. */ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), skl_plane_wm_level(new_pipe_wm, plane->id, level))) return false; } if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) return false; } return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), skl_plane_trans_wm(new_pipe_wm, plane->id)); } /* * To make sure the cursor watermark registers are always consistent * with our computed state the following scenario needs special * treatment: * * 1. enable cursor * 2. move cursor entirely offscreen * 3. disable cursor * * Step 2. does call .disable_plane() but does not zero the watermarks * (since we consider an offscreen cursor still active for the purposes * of watermarks). Step 3. would not normally call .disable_plane() * because the actual plane visibility isn't changing, and we don't * deallocate the cursor ddb until the pipe gets disabled. So we must * force step 3. to call .disable_plane() to update the watermark * registers properly. * * Other planes do not suffer from this issues as their watermarks are * calculated based on the actual plane visibility. The only time this * can trigger for the other planes is during the initial readout as the * default value of the watermarks registers is not zero. */ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane *plane; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { struct intel_plane_state *plane_state; enum plane_id plane_id = plane->id; /* * Force a full wm update for every plane on modeset. * Required because the reset value of the wm registers * is non-zero, whereas we want all disabled planes to * have zero watermarks. So if we turn off the relevant * power well the hardware state will go out of sync * with the software state. */ if (!intel_crtc_needs_modeset(new_crtc_state) && skl_plane_selected_wm_equals(plane, &old_crtc_state->wm.skl.optimal, &new_crtc_state->wm.skl.optimal)) continue; plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); new_crtc_state->async_flip_planes = 0; new_crtc_state->do_async_flip = false; } return 0; } static int skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_build_pipe_wm(state, crtc); if (ret) return ret; } ret = skl_compute_ddb(state); if (ret) return ret; ret = intel_compute_sagv_mask(state); if (ret) return ret; /* * skl_compute_ddb() will have adjusted the final watermarks * based on how much ddb is available. Now we can actually * check if the final watermarks changed. */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; } skl_print_wm_changes(state); return 0; } static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->enable = val & PLANE_WM_EN; level->ignore_lines = val & PLANE_WM_IGNORE_LINES; level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val); level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val); } static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum plane_id plane_id; int level; u32 val; for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &out->planes[plane_id]; for (level = 0; level < i915->display.wm.num_levels; level++) { if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level)); else val = intel_de_read(i915, CUR_WM(pipe, level)); skl_wm_level_from_reg_val(val, &wm->wm[level]); } if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id)); else val = intel_de_read(i915, CUR_WM_TRANS(pipe)); skl_wm_level_from_reg_val(val, &wm->trans_wm); if (HAS_HW_SAGV_WM(i915)) { if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id)); else val = intel_de_read(i915, CUR_WM_SAGV(pipe)); skl_wm_level_from_reg_val(val, &wm->sagv.wm0); if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id)); else val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe)); skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); } else if (DISPLAY_VER(i915) >= 12) { wm->sagv.wm0 = wm->wm[0]; wm->sagv.trans_wm = wm->trans_wm; } } } static void skl_wm_get_hw_state(struct drm_i915_private *i915) { struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc *crtc; if (HAS_MBUS_JOINING(i915)) dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; unsigned int mbus_offset; enum plane_id plane_id; u8 slices; memset(&crtc_state->wm.skl.optimal, 0, sizeof(crtc_state->wm.skl.optimal)); if (crtc_state->hw.active) skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe])); for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; if (!crtc_state->hw.active) continue; skl_ddb_get_hw_plane_state(i915, crtc->pipe, plane_id, ddb, ddb_y); skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); } dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); /* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets. */ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, dbuf_state->joined_mbus); mbus_offset = mbus_ddb_offset(i915, slices); crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; /* The slices actually used by the planes on the pipe */ dbuf_state->slices[pipe] = skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb); drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", crtc->base.base.id, crtc->base.name, dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe].end, dbuf_state->active_pipes, str_yes_no(dbuf_state->joined_mbus)); } dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices; } static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) { const struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); entries[crtc->pipe] = crtc_state->wm.skl.ddb; } for_each_intel_crtc(&i915->drm, crtc) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); u8 slices; slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, dbuf_state->joined_mbus); if (dbuf_state->slices[crtc->pipe] & ~slices) return true; if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, I915_MAX_PIPES, crtc->pipe)) return true; } return false; } static void skl_wm_sanitize(struct drm_i915_private *i915) { struct intel_crtc *crtc; /* * On TGL/RKL (at least) the BIOS likes to assign the planes * to the wrong DBUF slices. This will cause an infinite loop * in skl_commit_modeset_enables() as it can't find a way to * transition between the old bogus DBUF layout to the new * proper DBUF layout without DBUF allocation overlaps between * the planes (which cannot be allowed or else the hardware * may hang). If we detect a bogus DBUF layout just turn off * all the planes so that skl_commit_modeset_enables() can * simply ignore them. */ if (!skl_dbuf_is_misconfigured(i915)) return; drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); for_each_intel_crtc(&i915->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); if (plane_state->uapi.visible) intel_plane_disable_noatomic(crtc, plane); drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); } } static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) { skl_wm_get_hw_state(i915); skl_wm_sanitize(i915); } void intel_wm_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct skl_hw_state { struct skl_ddb_entry ddb[I915_MAX_PLANES]; struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; struct skl_pipe_wm wm; } *hw; const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; struct intel_plane *plane; u8 hw_enabled_slices; int level; if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) return; hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) return; skl_pipe_wm_get_hw_state(crtc, &hw->wm); skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915); if (DISPLAY_VER(i915) >= 11 && hw_enabled_slices != i915->display.dbuf.enabled_slices) drm_err(&i915->drm, "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", i915->display.dbuf.enabled_slices, hw_enabled_slices); for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; const struct skl_wm_level *hw_wm_level, *sw_wm_level; /* Watermarks */ for (level = 0; level < i915->display.wm.num_levels; level++) { hw_wm_level = &hw->wm.planes[plane->id].wm[level]; sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) continue; drm_err(&i915->drm, "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, level, sw_wm_level->enable, sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level->blocks, hw_wm_level->lines); } hw_wm_level = &hw->wm.planes[plane->id].trans_wm; sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { drm_err(&i915->drm, "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level->blocks, hw_wm_level->lines); } hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; if (HAS_HW_SAGV_WM(i915) && !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { drm_err(&i915->drm, "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level->blocks, hw_wm_level->lines); } hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; if (HAS_HW_SAGV_WM(i915) && !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { drm_err(&i915->drm, "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level->blocks, hw_wm_level->lines); } /* DDB */ hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { drm_err(&i915->drm, "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", plane->base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry->end, hw_ddb_entry->start, hw_ddb_entry->end); } } kfree(hw); } bool skl_watermark_ipc_enabled(struct drm_i915_private *i915) { return i915->display.wm.ipc_enabled; } void skl_watermark_ipc_update(struct drm_i915_private *i915) { if (!HAS_IPC(i915)) return; intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE, skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0); } static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915) { /* Display WA #0477 WaDisableIPC: skl */ if (IS_SKYLAKE(i915)) return false; /* Display WA #1141: SKL:all KBL:all CFL */ if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) return i915->dram_info.symmetric_memory; return true; } void skl_watermark_ipc_init(struct drm_i915_private *i915) { if (!HAS_IPC(i915)) return; i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); skl_watermark_ipc_update(i915); } static void adjust_wm_latency(struct drm_i915_private *i915, u16 wm[], int num_levels, int read_latency) { bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; int i, level; /* * If a level n (n > 1) has a 0us latency, all levels m (m >= n) * need to be disabled. We make sure to sanitize the values out * of the punit to satisfy this requirement. */ for (level = 1; level < num_levels; level++) { if (wm[level] == 0) { for (i = level + 1; i < num_levels; i++) wm[i] = 0; num_levels = level; break; } } /* * WaWmMemoryReadLatency * * punit doesn't take into account the read latency so we need * to add proper adjustement to each valid level we retrieve * from the punit when level 0 response data is 0us. */ if (wm[0] == 0) { for (level = 0; level < num_levels; level++) wm[level] += read_latency; } /* * WA Level-0 adjustment for 16GB DIMMs: SKL+ * If we could not get dimm info enable this WA to prevent from * any underrun. If not able to get Dimm info assume 16GB dimm * to avoid any underrun. */ if (wm_lv_0_adjust_needed) wm[0] += 1; } static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { int num_levels = i915->display.wm.num_levels; u32 val; val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); val = intel_de_read(i915, MTL_LATENCY_LP2_LP3); wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); val = intel_de_read(i915, MTL_LATENCY_LP4_LP5); wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); adjust_wm_latency(i915, wm, num_levels, 6); } static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { int num_levels = i915->display.wm.num_levels; int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; int mult = IS_DG2(i915) ? 2 : 1; u32 val; int ret; /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); return; } wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); return; } wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; adjust_wm_latency(i915, wm, num_levels, read_latency); } static void skl_setup_wm_latency(struct drm_i915_private *i915) { if (HAS_HW_SAGV_WM(i915)) i915->display.wm.num_levels = 6; else i915->display.wm.num_levels = 8; if (DISPLAY_VER(i915) >= 14) mtl_read_wm_latency(i915, i915->display.wm.skl_latency); else skl_read_wm_latency(i915, i915->display.wm.skl_latency); intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency); } static const struct intel_wm_funcs skl_wm_funcs = { .compute_global_watermarks = skl_compute_wm, .get_hw_state = skl_wm_get_hw_state_and_sanitize, }; void skl_wm_init(struct drm_i915_private *i915) { intel_sagv_init(i915); skl_setup_wm_latency(i915); i915->display.funcs.wm = &skl_wm_funcs; } static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) { struct intel_dbuf_state *dbuf_state; dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL); if (!dbuf_state) return NULL; return &dbuf_state->base; } static void intel_dbuf_destroy_state(struct intel_global_obj *obj, struct intel_global_state *state) { kfree(state); } static const struct intel_global_state_funcs intel_dbuf_funcs = { .atomic_duplicate_state = intel_dbuf_duplicate_state, .atomic_destroy_state = intel_dbuf_destroy_state, }; struct intel_dbuf_state * intel_atomic_get_dbuf_state(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_global_state *dbuf_state; dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); if (IS_ERR(dbuf_state)) return ERR_CAST(dbuf_state); return to_intel_dbuf_state(dbuf_state); } int intel_dbuf_init(struct drm_i915_private *i915) { struct intel_dbuf_state *dbuf_state; dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); if (!dbuf_state) return -ENOMEM; intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj, &dbuf_state->base, &intel_dbuf_funcs); return 0; } /* * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before * update the request state of all DBUS slices. */ static void update_mbus_pre_enable(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); u32 mbus_ctl, dbuf_min_tracker_val; enum dbuf_slice slice; const struct intel_dbuf_state *dbuf_state = intel_atomic_get_new_dbuf_state(state); if (!HAS_MBUS_JOINING(i915)) return; /* * TODO: Implement vblank synchronized MBUS joining changes. * Must be properly coordinated with dbuf reprogramming. */ if (dbuf_state->joined_mbus) { mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | MBUS_JOIN_PIPE_SELECT_NONE; dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); } else { mbus_ctl = MBUS_HASHING_MODE_2x2 | MBUS_JOIN_PIPE_SELECT_NONE; dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); } intel_de_rmw(i915, MBUS_CTL, MBUS_HASHING_MODE_MASK | MBUS_JOIN | MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); for_each_dbuf_slice(i915, slice) intel_de_rmw(i915, DBUF_CTL_S(slice), DBUF_MIN_TRACKER_STATE_SERVICE_MASK, dbuf_min_tracker_val); } void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (!new_dbuf_state || (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) return; WARN_ON(!new_dbuf_state->base.changed); update_mbus_pre_enable(state); gen9_dbuf_slices_update(i915, old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices); } void intel_dbuf_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (!new_dbuf_state || (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) return; WARN_ON(!new_dbuf_state->base.changed); gen9_dbuf_slices_update(i915, new_dbuf_state->enabled_slices); } static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) { switch (pipe) { case PIPE_A: return !(active_pipes & BIT(PIPE_D)); case PIPE_D: return !(active_pipes & BIT(PIPE_A)); case PIPE_B: return !(active_pipes & BIT(PIPE_C)); case PIPE_C: return !(active_pipes & BIT(PIPE_B)); default: /* to suppress compiler warning */ MISSING_CASE(pipe); break; } return false; } void intel_mbus_dbox_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; const struct intel_crtc_state *new_crtc_state; const struct intel_crtc *crtc; u32 val = 0; int i; if (DISPLAY_VER(i915) < 11) return; new_dbuf_state = intel_atomic_get_new_dbuf_state(state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (!new_dbuf_state || (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) return; if (DISPLAY_VER(i915) >= 14) val |= MBUS_DBOX_I_CREDIT(2); if (DISPLAY_VER(i915) >= 12) { val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; } if (DISPLAY_VER(i915) >= 14) val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8); else if (IS_ALDERLAKE_P(i915)) /* Wa_22010947358:adl-p */ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4); else val |= MBUS_DBOX_A_CREDIT(2); if (DISPLAY_VER(i915) >= 14) { val |= MBUS_DBOX_B_CREDIT(0xA); } else if (IS_ALDERLAKE_P(i915)) { val |= MBUS_DBOX_BW_CREDIT(2); val |= MBUS_DBOX_B_CREDIT(8); } else if (DISPLAY_VER(i915) >= 12) { val |= MBUS_DBOX_BW_CREDIT(2); val |= MBUS_DBOX_B_CREDIT(12); } else { val |= MBUS_DBOX_BW_CREDIT(1); val |= MBUS_DBOX_B_CREDIT(8); } for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { u32 pipe_val = val; if (!new_crtc_state->hw.active) continue; if (DISPLAY_VER(i915) >= 14) { if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, new_dbuf_state->active_pipes)) pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL; else pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL; } intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val); } } static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) { struct drm_i915_private *i915 = m->private; seq_printf(m, "Isochronous Priority Control: %s\n", str_yes_no(skl_watermark_ipc_enabled(i915))); return 0; } static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; return single_open(file, skl_watermark_ipc_status_show, i915); } static ssize_t skl_watermark_ipc_status_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *i915 = m->private; intel_wakeref_t wakeref; bool enable; int ret; ret = kstrtobool_from_user(ubuf, len, &enable); if (ret < 0) return ret; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { if (!skl_watermark_ipc_enabled(i915) && enable) drm_info(&i915->drm, "Enabling IPC: WM will be proper only after next commit\n"); i915->display.wm.ipc_enabled = enable; skl_watermark_ipc_update(i915); } return len; } static const struct file_operations skl_watermark_ipc_status_fops = { .owner = THIS_MODULE, .open = skl_watermark_ipc_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = skl_watermark_ipc_status_write }; static int intel_sagv_status_show(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = m->private; static const char * const sagv_status[] = { [I915_SAGV_UNKNOWN] = "unknown", [I915_SAGV_DISABLED] = "disabled", [I915_SAGV_ENABLED] = "enabled", [I915_SAGV_NOT_CONTROLLED] = "not controlled", }; seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv)); seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); void skl_watermark_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; if (HAS_IPC(i915)) debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, &skl_watermark_ipc_status_fops); if (HAS_SAGV(i915)) debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, &intel_sagv_status_fops); }
linux-master
drivers/gpu/drm/i915/display/skl_watermark.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" /** * intel_dkl_phy_init - initialize Dekel PHY * @i915: i915 device instance */ void intel_dkl_phy_init(struct drm_i915_private *i915) { spin_lock_init(&i915->display.dkl.phy_lock); } static void dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg) { enum tc_port tc_port = DKL_REG_TC_PORT(reg); drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS); intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, reg.bank_idx)); } /** * intel_dkl_phy_read - read a Dekel PHY register * @i915: i915 device instance * @reg: Dekel PHY register * * Read the @reg Dekel PHY register. * * Returns the read value. */ u32 intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg) { u32 val; spin_lock(&i915->display.dkl.phy_lock); dkl_phy_set_hip_idx(i915, reg); val = intel_de_read(i915, DKL_REG_MMIO(reg)); spin_unlock(&i915->display.dkl.phy_lock); return val; } /** * intel_dkl_phy_write - write a Dekel PHY register * @i915: i915 device instance * @reg: Dekel PHY register * @val: value to write * * Write @val to the @reg Dekel PHY register. */ void intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 val) { spin_lock(&i915->display.dkl.phy_lock); dkl_phy_set_hip_idx(i915, reg); intel_de_write(i915, DKL_REG_MMIO(reg), val); spin_unlock(&i915->display.dkl.phy_lock); } /** * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register * @i915: i915 device instance * @reg: Dekel PHY register * @clear: mask to clear * @set: mask to set * * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing * this value back to the register if the value differs from the read one. */ void intel_dkl_phy_rmw(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 clear, u32 set) { spin_lock(&i915->display.dkl.phy_lock); dkl_phy_set_hip_idx(i915, reg); intel_de_rmw(i915, DKL_REG_MMIO(reg), clear, set); spin_unlock(&i915->display.dkl.phy_lock); } /** * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register * @i915: i915 device instance * @reg: Dekel PHY register * * Read the @reg Dekel PHY register without returning the read value. */ void intel_dkl_phy_posting_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg) { spin_lock(&i915->display.dkl.phy_lock); dkl_phy_set_hip_idx(i915, reg); intel_de_posting_read(i915, DKL_REG_MMIO(reg)); spin_unlock(&i915->display.dkl.phy_lock); }
linux-master
drivers/gpu/drm/i915/display/intel_dkl_phy.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright © 2021 Intel Corporation */ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "intel_display_trace.h" #endif
linux-master
drivers/gpu/drm/i915/display/intel_display_trace.c
/************************************************************************** Copyright © 2006 Dave Airlie All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ #include "intel_display_types.h" #include "intel_dvo_dev.h" #define CH7xxx_REG_VID 0x4a #define CH7xxx_REG_DID 0x4b #define CH7011_VID 0x83 /* 7010 as well */ #define CH7010B_VID 0x05 #define CH7009A_VID 0x84 #define CH7009B_VID 0x85 #define CH7301_VID 0x95 #define CH7xxx_VID 0x84 #define CH7xxx_DID 0x17 #define CH7010_DID 0x16 #define CH7xxx_NUM_REGS 0x4c #define CH7xxx_CM 0x1c #define CH7xxx_CM_XCM (1<<0) #define CH7xxx_CM_MCP (1<<2) #define CH7xxx_INPUT_CLOCK 0x1d #define CH7xxx_GPIO 0x1e #define CH7xxx_GPIO_HPIR (1<<3) #define CH7xxx_IDF 0x1f #define CH7xxx_IDF_IBS (1<<7) #define CH7xxx_IDF_DES (1<<6) #define CH7xxx_IDF_HSP (1<<3) #define CH7xxx_IDF_VSP (1<<4) #define CH7xxx_CONNECTION_DETECT 0x20 #define CH7xxx_CDET_DVI (1<<5) #define CH7xxx_DAC_CNTL 0x21 #define CH7xxx_SYNCO_MASK (3 << 3) #define CH7xxx_SYNCO_VGA_HSYNC (1 << 3) #define CH7xxx_CLOCK_OUTPUT 0x22 #define CH7xxx_BCOEN (1 << 4) #define CH7xxx_BCOP (1 << 3) #define CH7xxx_BCO_MASK (7 << 0) #define CH7xxx_BCO_VGA_VSYNC (6 << 0) #define CH7301_HOTPLUG 0x23 #define CH7xxx_TCTL 0x31 #define CH7xxx_TVCO 0x32 #define CH7xxx_TPCP 0x33 #define CH7xxx_TPD 0x34 #define CH7xxx_TPVT 0x35 #define CH7xxx_TLPF 0x36 #define CH7xxx_TCT 0x37 #define CH7301_TEST_PATTERN 0x48 #define CH7xxx_PM 0x49 #define CH7xxx_PM_FPD (1<<0) #define CH7301_PM_DACPD0 (1<<1) #define CH7301_PM_DACPD1 (1<<2) #define CH7301_PM_DACPD2 (1<<3) #define CH7xxx_PM_DVIL (1<<6) #define CH7xxx_PM_DVIP (1<<7) #define CH7301_SYNC_POLARITY 0x56 #define CH7301_SYNC_RGB_YUV (1<<0) #define CH7301_SYNC_POL_DVI (1<<5) /** @file * driver for the Chrontel 7xxx DVI chip over DVO. */ static struct ch7xxx_id_struct { u8 vid; char *name; } ch7xxx_ids[] = { { CH7011_VID, "CH7011" }, { CH7010B_VID, "CH7010B" }, { CH7009A_VID, "CH7009A" }, { CH7009B_VID, "CH7009B" }, { CH7301_VID, "CH7301" }, }; static struct ch7xxx_did_struct { u8 did; char *name; } ch7xxx_dids[] = { { CH7xxx_DID, "CH7XXX" }, { CH7010_DID, "CH7010B" }, }; struct ch7xxx_priv { bool quiet; }; static char *ch7xxx_get_id(u8 vid) { int i; for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) { if (ch7xxx_ids[i].vid == vid) return ch7xxx_ids[i].name; } return NULL; } static char *ch7xxx_get_did(u8 did) { int i; for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) { if (ch7xxx_dids[i].did == did) return ch7xxx_dids[i].name; } return NULL; } /** Reads an 8 bit register */ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } /** Writes an 8 bit register */ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool ch7xxx_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the CH7xxx chip on the specified i2c bus */ struct ch7xxx_priv *ch7xxx; u8 vendor, device; char *name, *devid; ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); if (ch7xxx == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = ch7xxx; ch7xxx->quiet = true; if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor)) goto out; name = ch7xxx_get_id(vendor); if (!name) { DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n", vendor, adapter->name, dvo->slave_addr); goto out; } if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) goto out; devid = ch7xxx_get_did(device); if (!devid) { DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n", device, adapter->name, dvo->slave_addr); goto out; } ch7xxx->quiet = false; DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", name, vendor, device); return true; out: kfree(ch7xxx); return false; } static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) { u8 cdet, orig_pm, pm; ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); pm = orig_pm; pm &= ~CH7xxx_PM_FPD; pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP; ch7xxx_writeb(dvo, CH7xxx_PM, pm); ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet); ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm); if (cdet & CH7xxx_CDET_DVI) return connector_status_connected; return connector_status_disconnected; } static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; return MODE_OK; } static void ch7xxx_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { u8 tvco, tpcp, tpd, tlpf, idf; if (mode->clock <= 65000) { tvco = 0x23; tpcp = 0x08; tpd = 0x16; tlpf = 0x60; } else { tvco = 0x2d; tpcp = 0x06; tpd = 0x26; tlpf = 0xa0; } ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00); ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco); ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp); ch7xxx_writeb(dvo, CH7xxx_TPD, tpd); ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30); ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf); ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00); ch7xxx_readb(dvo, CH7xxx_IDF, &idf); idf |= CH7xxx_IDF_IBS; idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP); if (mode->flags & DRM_MODE_FLAG_PHSYNC) idf |= CH7xxx_IDF_HSP; if (mode->flags & DRM_MODE_FLAG_PVSYNC) idf |= CH7xxx_IDF_VSP; ch7xxx_writeb(dvo, CH7xxx_IDF, idf); ch7xxx_writeb(dvo, CH7xxx_DAC_CNTL, CH7xxx_SYNCO_VGA_HSYNC); ch7xxx_writeb(dvo, CH7xxx_CLOCK_OUTPUT, CH7xxx_BCOEN | CH7xxx_BCO_VGA_VSYNC); } /* set the CH7xxx power state */ static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) { if (enable) ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); else ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); } static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) { u8 val; ch7xxx_readb(dvo, CH7xxx_PM, &val); if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP)) return true; else return false; } static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) { int i; for (i = 0; i < CH7xxx_NUM_REGS; i++) { u8 val; if ((i % 8) == 0) DRM_DEBUG_KMS("\n %02X: ", i); ch7xxx_readb(dvo, i, &val); DRM_DEBUG_KMS("%02X ", val); } } static void ch7xxx_destroy(struct intel_dvo_device *dvo) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; if (ch7xxx) { kfree(ch7xxx); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops ch7xxx_ops = { .init = ch7xxx_init, .detect = ch7xxx_detect, .mode_valid = ch7xxx_mode_valid, .mode_set = ch7xxx_mode_set, .dpms = ch7xxx_dpms, .get_hw_state = ch7xxx_get_hw_state, .dump_regs = ch7xxx_dump_regs, .destroy = ch7xxx_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_ch7xxx.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "vlv_sideband_reg.h" #include "intel_display_power_map.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #define __LIST_INLINE_ELEMS(__elem_type, ...) \ ((__elem_type[]) { __VA_ARGS__ }) #define __LIST(__elems) { \ .list = __elems, \ .count = ARRAY_SIZE(__elems), \ } #define I915_PW_DOMAINS(...) \ (const struct i915_power_domain_list) \ __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__)) #define I915_DECL_PW_DOMAINS(__name, ...) \ static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__) /* Zero-length list assigns all power domains, a NULL list assigns none. */ #define I915_PW_DOMAINS_NONE NULL #define I915_PW_DOMAINS_ALL /* zero-length list */ #define I915_PW_INSTANCES(...) \ (const struct i915_power_well_instance_list) \ __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__)) #define I915_PW(_name, _domain_list, ...) \ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ } struct i915_power_well_desc_list { const struct i915_power_well_desc *list; u8 count; }; #define I915_PW_DESCRIPTORS(x) __LIST(x) I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL); static const struct i915_power_well_desc i9xx_power_wells_always_on[] = { { .instances = &I915_PW_INSTANCES( I915_PW("always-on", &i9xx_pwdoms_always_on), ), .ops = &i9xx_always_on_power_well_ops, .always_on = true, }, }; static const struct i915_power_well_desc_list i9xx_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), }; I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_PIPE_PANEL_FITTER_B, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_INIT); static const struct i915_power_well_desc i830_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("pipes", &i830_pwdoms_pipes), ), .ops = &i830_pipes_power_well_ops, }, }; static const struct i915_power_well_desc_list i830_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(i830_power_wells_main), }; I915_DECL_PW_DOMAINS(hsw_pwdoms_display, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_PIPE_PANEL_FITTER_B, POWER_DOMAIN_PIPE_PANEL_FITTER_C, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_PORT_DDI_LANES_D, POWER_DOMAIN_PORT_CRT, /* DDI E */ POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); static const struct i915_power_well_desc hsw_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("display", &hsw_pwdoms_display, .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .id = HSW_DISP_PW_GLOBAL), ), .ops = &hsw_power_well_ops, .has_vga = true, }, }; static const struct i915_power_well_desc_list hsw_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(hsw_power_wells_main), }; I915_DECL_PW_DOMAINS(bdw_pwdoms_display, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, POWER_DOMAIN_PIPE_PANEL_FITTER_B, POWER_DOMAIN_PIPE_PANEL_FITTER_C, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_PORT_DDI_LANES_D, POWER_DOMAIN_PORT_CRT, /* DDI E */ POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); static const struct i915_power_well_desc bdw_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("display", &bdw_pwdoms_display, .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .id = HSW_DISP_PW_GLOBAL), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), }, }; static const struct i915_power_well_desc_list bdw_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(bdw_power_wells_main), }; I915_DECL_PW_DOMAINS(vlv_pwdoms_display, POWER_DOMAIN_DISPLAY_CORE, POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_PIPE_PANEL_FITTER_B, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_GMBUS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); static const struct i915_power_well_desc vlv_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("display", &vlv_pwdoms_display, .vlv.idx = PUNIT_PWGT_IDX_DISP2D, .id = VLV_DISP_PW_DISP2D), ), .ops = &vlv_display_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes, .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01), I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes, .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23), I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes, .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01), I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes, .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23), ), .ops = &vlv_dpio_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc, .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, .id = VLV_DISP_PW_DPIO_CMN_BC), ), .ops = &vlv_dpio_cmn_power_well_ops, }, }; static const struct i915_power_well_desc_list vlv_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(vlv_power_wells_main), }; I915_DECL_PW_DOMAINS(chv_pwdoms_display, POWER_DOMAIN_DISPLAY_CORE, POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_PIPE_PANEL_FITTER_B, POWER_DOMAIN_PIPE_PANEL_FITTER_C, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_PORT_DDI_LANES_D, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_IO_D, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, POWER_DOMAIN_GMBUS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d, POWER_DOMAIN_PORT_DDI_LANES_D, POWER_DOMAIN_AUX_IO_D, POWER_DOMAIN_AUX_D, POWER_DOMAIN_INIT); static const struct i915_power_well_desc chv_power_wells_main[] = { { /* * Pipe A power well is the new disp2d well. Pipe B and C * power wells don't actually exist. Pipe A power well is * required for any pipe to work. */ .instances = &I915_PW_INSTANCES( I915_PW("display", &chv_pwdoms_display), ), .ops = &chv_pipe_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc, .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, .id = VLV_DISP_PW_DPIO_CMN_BC), I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d, .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, .id = CHV_DISP_PW_DPIO_CMN_D), ), .ops = &chv_dpio_cmn_power_well_ops, }, }; static const struct i915_power_well_desc_list chv_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(chv_power_wells_main), }; #define SKL_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_A, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_TRANSCODER_C, \ POWER_DOMAIN_PORT_DDI_LANES_B, \ POWER_DOMAIN_PORT_DDI_LANES_C, \ POWER_DOMAIN_PORT_DDI_LANES_D, \ POWER_DOMAIN_PORT_DDI_LANES_E, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_IO_B, \ POWER_DOMAIN_AUX_IO_C, \ POWER_DOMAIN_AUX_IO_D, \ POWER_DOMAIN_AUX_B, \ POWER_DOMAIN_AUX_C, \ POWER_DOMAIN_AUX_D I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2, SKL_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off, SKL_PW_2_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e, POWER_DOMAIN_PORT_DDI_IO_A, POWER_DOMAIN_PORT_DDI_IO_E, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D, POWER_DOMAIN_INIT); static const struct i915_power_well_desc skl_power_wells_pw_1[] = { { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("PW_1", I915_PW_DOMAINS_NONE, .hsw.idx = SKL_PW_CTL_IDX_PW_1, .id = SKL_DISP_PW_1), ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, }, }; static const struct i915_power_well_desc skl_power_wells_main[] = { { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("MISC_IO", I915_PW_DOMAINS_NONE, .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, .id = SKL_DISP_PW_MISC_IO), ), .ops = &hsw_power_well_ops, .always_on = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &skl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &skl_pwdoms_pw_2, .hsw.idx = SKL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E), I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D), ), .ops = &hsw_power_well_ops, }, }; static const struct i915_power_well_desc_list skl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(skl_power_wells_pw_1), I915_PW_DESCRIPTORS(skl_power_wells_main), }; #define BXT_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_A, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_TRANSCODER_C, \ POWER_DOMAIN_PORT_DDI_LANES_B, \ POWER_DOMAIN_PORT_DDI_LANES_C, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_IO_B, \ POWER_DOMAIN_AUX_IO_C, \ POWER_DOMAIN_AUX_B, \ POWER_DOMAIN_AUX_C I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2, BXT_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off, BXT_PW_2_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a, POWER_DOMAIN_PORT_DDI_LANES_A, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_A, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); static const struct i915_power_well_desc bxt_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &bxt_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &bxt_pwdoms_pw_2, .hsw.idx = SKL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a, .bxt.phy = DPIO_PHY1, .id = BXT_DISP_PW_DPIO_CMN_A), I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc, .bxt.phy = DPIO_PHY0, .id = VLV_DISP_PW_DPIO_CMN_BC), ), .ops = &bxt_dpio_cmn_power_well_ops, }, }; static const struct i915_power_well_desc_list bxt_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(skl_power_wells_pw_1), I915_PW_DESCRIPTORS(bxt_power_wells_main), }; #define GLK_PW_2_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_A, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_TRANSCODER_C, \ POWER_DOMAIN_PORT_DDI_LANES_B, \ POWER_DOMAIN_PORT_DDI_LANES_C, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_IO_B, \ POWER_DOMAIN_AUX_IO_C, \ POWER_DOMAIN_AUX_B, \ POWER_DOMAIN_AUX_C I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2, GLK_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off, GLK_PW_2_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A); I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B); I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C); I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a, POWER_DOMAIN_PORT_DDI_LANES_A, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_A, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b, POWER_DOMAIN_PORT_DDI_LANES_B, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_B, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c, POWER_DOMAIN_PORT_DDI_LANES_C, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_A, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_B, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_C, POWER_DOMAIN_INIT); static const struct i915_power_well_desc glk_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &glk_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &glk_pwdoms_pw_2, .hsw.idx = SKL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a, .bxt.phy = DPIO_PHY1, .id = BXT_DISP_PW_DPIO_CMN_A), I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b, .bxt.phy = DPIO_PHY0, .id = VLV_DISP_PW_DPIO_CMN_BC), I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c, .bxt.phy = DPIO_PHY2, .id = GLK_DISP_PW_DPIO_CMN_C), ), .ops = &bxt_dpio_cmn_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B), I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C), I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), ), .ops = &hsw_power_well_ops, }, }; static const struct i915_power_well_desc_list glk_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(skl_power_wells_pw_1), I915_PW_DESCRIPTORS(glk_power_wells_main), }; /* * ICL PW_0/PG_0 domains (HW/DMC control): * - PCI * - clocks except port PLL * - central power except FBC * - shared functions except pipe interrupts, pipe MBUS, DBUF registers * ICL PW_1/PG_1 domains (HW/DMC control): * - DBUF function * - PIPE_A and its planes, except VGA * - transcoder EDP + PSR * - transcoder DSI * - DDI_A * - FBC */ #define ICL_PW_4_POWER_DOMAINS \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4, ICL_PW_4_POWER_DOMAINS, POWER_DOMAIN_INIT); /* VDSC/joining */ #define ICL_PW_3_POWER_DOMAINS \ ICL_PW_4_POWER_DOMAINS, \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_TRANSCODER_A, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_TRANSCODER_C, \ POWER_DOMAIN_PORT_DDI_LANES_B, \ POWER_DOMAIN_PORT_DDI_LANES_C, \ POWER_DOMAIN_PORT_DDI_LANES_D, \ POWER_DOMAIN_PORT_DDI_LANES_E, \ POWER_DOMAIN_PORT_DDI_LANES_F, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_IO_B, \ POWER_DOMAIN_AUX_IO_C, \ POWER_DOMAIN_AUX_IO_D, \ POWER_DOMAIN_AUX_IO_E, \ POWER_DOMAIN_AUX_IO_F, \ POWER_DOMAIN_AUX_B, \ POWER_DOMAIN_AUX_C, \ POWER_DOMAIN_AUX_D, \ POWER_DOMAIN_AUX_E, \ POWER_DOMAIN_AUX_F, \ POWER_DOMAIN_AUX_TBT1, \ POWER_DOMAIN_AUX_TBT2, \ POWER_DOMAIN_AUX_TBT3, \ POWER_DOMAIN_AUX_TBT4 I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3, ICL_PW_3_POWER_DOMAINS, POWER_DOMAIN_INIT); /* * - transcoder WD * - KVMR (HW control) */ #define ICL_PW_2_POWER_DOMAINS \ ICL_PW_3_POWER_DOMAINS, \ POWER_DOMAIN_TRANSCODER_VDSC_PW2 I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2, ICL_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); /* * - KVMR (HW control) */ I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off, ICL_PW_2_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E); I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_A); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_IO_B, POWER_DOMAIN_AUX_B); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_IO_C, POWER_DOMAIN_AUX_C); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_IO_D, POWER_DOMAIN_AUX_D); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_IO_E, POWER_DOMAIN_AUX_E); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_IO_F, POWER_DOMAIN_AUX_F); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); static const struct i915_power_well_desc icl_power_wells_pw_1[] = { { /* Handled by the DMC firmware */ .instances = &I915_PW_INSTANCES( I915_PW("PW_1", I915_PW_DOMAINS_NONE, .hsw.idx = ICL_PW_CTL_IDX_PW_1, .id = SKL_DISP_PW_1), ), .ops = &hsw_power_well_ops, .always_on = true, .has_fuses = true, }, }; static const struct i915_power_well_desc icl_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &icl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &icl_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_3", &icl_pwdoms_pw_3, .hsw.idx = ICL_PW_CTL_IDX_PW_3, .id = ICL_DISP_PW_3), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D), I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E), I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F), ), .ops = &icl_ddi_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D), I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E), I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F), ), .ops = &icl_aux_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1), I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2), I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3), I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4), ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_4", &icl_pwdoms_pw_4, .hsw.idx = ICL_PW_CTL_IDX_PW_4), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, }, }; static const struct i915_power_well_desc_list icl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(icl_power_wells_main), }; #define TGL_PW_5_POWER_DOMAINS \ POWER_DOMAIN_PIPE_D, \ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ POWER_DOMAIN_TRANSCODER_D I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5, TGL_PW_5_POWER_DOMAINS, POWER_DOMAIN_INIT); #define TGL_PW_4_POWER_DOMAINS \ TGL_PW_5_POWER_DOMAINS, \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_C I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4, TGL_PW_4_POWER_DOMAINS, POWER_DOMAIN_INIT); #define TGL_PW_3_POWER_DOMAINS \ TGL_PW_4_POWER_DOMAINS, \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ POWER_DOMAIN_PORT_DDI_LANES_TC4, \ POWER_DOMAIN_PORT_DDI_LANES_TC5, \ POWER_DOMAIN_PORT_DDI_LANES_TC6, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_USBC1, \ POWER_DOMAIN_AUX_USBC2, \ POWER_DOMAIN_AUX_USBC3, \ POWER_DOMAIN_AUX_USBC4, \ POWER_DOMAIN_AUX_USBC5, \ POWER_DOMAIN_AUX_USBC6, \ POWER_DOMAIN_AUX_TBT1, \ POWER_DOMAIN_AUX_TBT2, \ POWER_DOMAIN_AUX_TBT3, \ POWER_DOMAIN_AUX_TBT4, \ POWER_DOMAIN_AUX_TBT5, \ POWER_DOMAIN_AUX_TBT6 I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3, TGL_PW_3_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2, TGL_PW_3_POWER_DOMAINS, POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off, TGL_PW_3_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5); I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5); I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6); I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off, POWER_DOMAIN_AUX_USBC1, POWER_DOMAIN_AUX_USBC2, POWER_DOMAIN_AUX_USBC3, POWER_DOMAIN_AUX_USBC4, POWER_DOMAIN_AUX_USBC5, POWER_DOMAIN_AUX_USBC6, POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, POWER_DOMAIN_AUX_TBT5, POWER_DOMAIN_AUX_TBT6, POWER_DOMAIN_TC_COLD_OFF); static const struct i915_power_well_desc tgl_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &tgl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &tgl_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_3", &tgl_pwdoms_pw_3, .hsw.idx = ICL_PW_CTL_IDX_PW_3, .id = ICL_DISP_PW_3), ), .ops = &hsw_power_well_ops, .has_vga = true, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5), I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6), ), .ops = &icl_ddi_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_4", &tgl_pwdoms_pw_4, .hsw.idx = ICL_PW_CTL_IDX_PW_4), ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_5", &tgl_pwdoms_pw_5, .hsw.idx = TGL_PW_CTL_IDX_PW_5), ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), }, }; static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = { { .instances = &I915_PW_INSTANCES( I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off, .id = TGL_DISP_PW_TC_COLD_OFF), ), .ops = &tgl_tc_cold_off_ops, }, }; static const struct i915_power_well_desc tgl_power_wells_aux[] = { { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5), I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6), ), .ops = &icl_aux_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5), I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6), ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, }, }; static const struct i915_power_well_desc_list tgl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(tgl_power_wells_main), I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off), I915_PW_DESCRIPTORS(tgl_power_wells_aux), }; static const struct i915_power_well_desc_list adls_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(tgl_power_wells_main), I915_PW_DESCRIPTORS(tgl_power_wells_aux), }; #define RKL_PW_4_POWER_DOMAINS \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_C I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4, RKL_PW_4_POWER_DOMAINS, POWER_DOMAIN_INIT); #define RKL_PW_3_POWER_DOMAINS \ RKL_PW_4_POWER_DOMAINS, \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_MMIO, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_USBC1, \ POWER_DOMAIN_AUX_USBC2 I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3, RKL_PW_3_POWER_DOMAINS, POWER_DOMAIN_INIT); /* * There is no PW_2/PG_2 on RKL. * * RKL PW_1/PG_1 domains (under HW/DMC control): * - DBUF function (note: registers are in PW0) * - PIPE_A and its planes and VDSC/joining, except VGA * - transcoder A * - DDI_A and DDI_B * - FBC * * RKL PW_0/PG_0 domains (under HW/DMC control): * - PCI * - clocks except port PLL * - shared functions: * * interrupts except pipe interrupts * * MBus except PIPE_MBUS_DBOX_CTL * * DBUF registers * - central power except FBC * - top-level GTC (DDI-level GTC is in the well associated with the DDI) */ I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off, RKL_PW_3_POWER_DOMAINS, POWER_DOMAIN_AUX_A, POWER_DOMAIN_AUX_B, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); static const struct i915_power_well_desc rkl_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &rkl_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_3", &rkl_pwdoms_pw_3, .hsw.idx = ICL_PW_CTL_IDX_PW_3, .id = ICL_DISP_PW_3), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_4", &rkl_pwdoms_pw_4, .hsw.idx = ICL_PW_CTL_IDX_PW_4), ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), }, }; static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), ), .ops = &icl_ddi_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), ), .ops = &icl_aux_power_well_ops, }, }; static const struct i915_power_well_desc_list rkl_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(rkl_power_wells_main), I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), }; /* * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. */ #define DG1_PW_3_POWER_DOMAINS \ TGL_PW_4_POWER_DOMAINS, \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_TRANSCODER_B, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_USBC1, \ POWER_DOMAIN_AUX_USBC2 I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3, DG1_PW_3_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off, DG1_PW_3_POWER_DOMAINS, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUX_A, POWER_DOMAIN_AUX_B, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2, DG1_PW_3_POWER_DOMAINS, POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_INIT); static const struct i915_power_well_desc dg1_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &dg1_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &dg1_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_3", &dg1_pwdoms_pw_3, .hsw.idx = ICL_PW_CTL_IDX_PW_3, .id = ICL_DISP_PW_3), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_vga = true, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_4", &tgl_pwdoms_pw_4, .hsw.idx = ICL_PW_CTL_IDX_PW_4), ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_C), }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_5", &tgl_pwdoms_pw_5, .hsw.idx = TGL_PW_CTL_IDX_PW_5), ), .ops = &hsw_power_well_ops, .has_fuses = true, .irq_pipe_mask = BIT(PIPE_D), }, }; static const struct i915_power_well_desc_list dg1_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(dg1_power_wells_main), I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), }; /* * XE_LPD Power Domains * * Previous platforms required that PG(n-1) be enabled before PG(n). That * dependency chain turns into a dependency tree on XE_LPD: * * PG0 * | * --PG1-- * / \ * PGA --PG2-- * / | \ * PGB PGC PGD * * Power wells must be enabled from top to bottom and disabled from bottom * to top. This allows pipes to be power gated independently. */ #define XELPD_PW_D_POWER_DOMAINS \ POWER_DOMAIN_PIPE_D, \ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ POWER_DOMAIN_TRANSCODER_D I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d, XELPD_PW_D_POWER_DOMAINS, POWER_DOMAIN_INIT); #define XELPD_PW_C_POWER_DOMAINS \ POWER_DOMAIN_PIPE_C, \ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ POWER_DOMAIN_TRANSCODER_C I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c, XELPD_PW_C_POWER_DOMAINS, POWER_DOMAIN_INIT); #define XELPD_PW_B_POWER_DOMAINS \ POWER_DOMAIN_PIPE_B, \ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ POWER_DOMAIN_TRANSCODER_B I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b, XELPD_PW_B_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_PANEL_FITTER_A, POWER_DOMAIN_INIT); #define XELPD_DC_OFF_PORT_POWER_DOMAINS \ POWER_DOMAIN_PORT_DDI_LANES_C, \ POWER_DOMAIN_PORT_DDI_LANES_D, \ POWER_DOMAIN_PORT_DDI_LANES_E, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ POWER_DOMAIN_PORT_DDI_LANES_TC4, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_AUX_IO_C, \ POWER_DOMAIN_AUX_IO_D, \ POWER_DOMAIN_AUX_IO_E, \ POWER_DOMAIN_AUX_C, \ POWER_DOMAIN_AUX_D, \ POWER_DOMAIN_AUX_E, \ POWER_DOMAIN_AUX_USBC1, \ POWER_DOMAIN_AUX_USBC2, \ POWER_DOMAIN_AUX_USBC3, \ POWER_DOMAIN_AUX_USBC4, \ POWER_DOMAIN_AUX_TBT1, \ POWER_DOMAIN_AUX_TBT2, \ POWER_DOMAIN_AUX_TBT3, \ POWER_DOMAIN_AUX_TBT4 #define XELPD_PW_2_POWER_DOMAINS \ XELPD_PW_B_POWER_DOMAINS, \ XELPD_PW_C_POWER_DOMAINS, \ XELPD_PW_D_POWER_DOMAINS, \ XELPD_DC_OFF_PORT_POWER_DOMAINS I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, XELPD_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); /* * XELPD PW_1/PG_1 domains (under HW/DMC control): * - DBUF function (registers are in PW0) * - Transcoder A * - DDI_A and DDI_B * * XELPD PW_0/PW_1 domains (under HW/DMC control): * - PCI * - Clocks except port PLL * - Shared functions: * * interrupts except pipe interrupts * * MBus except PIPE_MBUS_DBOX_CTL * * DBUF registers * - Central power except FBC * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) */ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, XELPD_DC_OFF_PORT_POWER_DOMAINS, XELPD_PW_C_POWER_DOMAINS, XELPD_PW_D_POWER_DOMAINS, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUX_A, POWER_DOMAIN_AUX_B, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); static const struct i915_power_well_desc xelpd_power_wells_dc_off[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &xelpd_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, } }; static const struct i915_power_well_desc xelpd_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &xelpd_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_vga = true, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_A", &xelpd_pwdoms_pw_a, .hsw.idx = XELPD_PW_CTL_IDX_PW_A), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_A), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_B", &xelpd_pwdoms_pw_b, .hsw.idx = XELPD_PW_CTL_IDX_PW_B), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_C", &xelpd_pwdoms_pw_c, .hsw.idx = XELPD_PW_CTL_IDX_PW_C), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_D", &xelpd_pwdoms_pw_d, .hsw.idx = XELPD_PW_CTL_IDX_PW_D), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), ), .ops = &icl_ddi_power_well_ops, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), ), .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), ), .ops = &icl_aux_power_well_ops, .fixed_enable_delay = true, /* WA_14017248603: adlp */ .enable_timeout = 500, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), ), .ops = &icl_aux_power_well_ops, .is_tc_tbt = true, }, }; static const struct i915_power_well_desc_list xelpd_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpd_power_wells_main), }; I915_DECL_PW_DOMAINS(xehpd_pwdoms_dc_off, XELPD_PW_2_POWER_DOMAINS, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_AUDIO_MMIO, POWER_DOMAIN_AUX_A, POWER_DOMAIN_AUX_B, POWER_DOMAIN_MODESET, POWER_DOMAIN_DC_OFF, POWER_DOMAIN_INIT); static const struct i915_power_well_desc xehpd_power_wells_dc_off[] = { { .instances = &I915_PW_INSTANCES( I915_PW("DC_off", &xehpd_pwdoms_dc_off, .id = SKL_DISP_DC_OFF), ), .ops = &gen9_dc_off_power_well_ops, } }; static const struct i915_power_well_desc_list xehpd_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(xehpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpd_power_wells_main), }; /* * MTL is based on XELPD power domains with the exception of power gating for: * - DDI_IO (moved to PLL logic) * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on) */ #define XELPDP_PW_2_POWER_DOMAINS \ XELPD_PW_B_POWER_DOMAINS, \ XELPD_PW_C_POWER_DOMAINS, \ XELPD_PW_D_POWER_DOMAINS, \ POWER_DOMAIN_AUDIO_PLAYBACK, \ POWER_DOMAIN_VGA, \ POWER_DOMAIN_PORT_DDI_LANES_TC1, \ POWER_DOMAIN_PORT_DDI_LANES_TC2, \ POWER_DOMAIN_PORT_DDI_LANES_TC3, \ POWER_DOMAIN_PORT_DDI_LANES_TC4 I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2, XELPDP_PW_2_POWER_DOMAINS, POWER_DOMAIN_INIT); I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1, POWER_DOMAIN_AUX_USBC1, POWER_DOMAIN_AUX_TBT1); I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2, POWER_DOMAIN_AUX_USBC2, POWER_DOMAIN_AUX_TBT2); I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3, POWER_DOMAIN_AUX_USBC3, POWER_DOMAIN_AUX_TBT3); I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4, POWER_DOMAIN_AUX_USBC4, POWER_DOMAIN_AUX_TBT4); static const struct i915_power_well_desc xelpdp_power_wells_main[] = { { .instances = &I915_PW_INSTANCES( I915_PW("PW_2", &xelpdp_pwdoms_pw_2, .hsw.idx = ICL_PW_CTL_IDX_PW_2, .id = SKL_DISP_PW_2), ), .ops = &hsw_power_well_ops, .has_vga = true, .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_A", &xelpd_pwdoms_pw_a, .hsw.idx = XELPD_PW_CTL_IDX_PW_A), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_A), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_B", &xelpd_pwdoms_pw_b, .hsw.idx = XELPD_PW_CTL_IDX_PW_B), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_B), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_C", &xelpd_pwdoms_pw_c, .hsw.idx = XELPD_PW_CTL_IDX_PW_C), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_C), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("PW_D", &xelpd_pwdoms_pw_d, .hsw.idx = XELPD_PW_CTL_IDX_PW_D), ), .ops = &hsw_power_well_ops, .irq_pipe_mask = BIT(PIPE_D), .has_fuses = true, }, { .instances = &I915_PW_INSTANCES( I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), ), .ops = &xelpdp_aux_power_well_ops, }, }; static const struct i915_power_well_desc_list xelpdp_power_wells[] = { I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), I915_PW_DESCRIPTORS(icl_power_wells_pw_1), I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off), I915_PW_DESCRIPTORS(xelpdp_power_wells_main), }; static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { int j; if (!inst->domain_list) return; if (inst->domain_list->count == 0) { bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM); return; } for (j = 0; j < inst->domain_list->count; j++) set_bit(inst->domain_list->list[j], power_well->domains.bits); } #define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \ for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \ for ((_inst) = (_desc)->instances->list; \ (_inst) - (_desc)->instances->list < (_desc)->instances->count; \ (_inst)++) #define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \ for ((_descs) = (_desc_list); \ (_descs) - (_desc_list) < (_desc_count); \ (_descs)++) \ for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \ (_desc), (_inst)) static int __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc_list *power_well_descs, int power_well_descs_sz) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); u64 power_well_ids = 0; const struct i915_power_well_desc_list *desc_list; const struct i915_power_well_desc *desc; const struct i915_power_well_instance *inst; int power_well_count = 0; int plt_idx = 0; for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) power_well_count++; power_domains->power_well_count = power_well_count; power_domains->power_wells = kcalloc(power_well_count, sizeof(*power_domains->power_wells), GFP_KERNEL); if (!power_domains->power_wells) return -ENOMEM; for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) { struct i915_power_well *pw = &power_domains->power_wells[plt_idx]; enum i915_power_well_id id = inst->id; pw->desc = desc; drm_WARN_ON(&i915->drm, overflows_type(inst - desc->instances->list, pw->instance_idx)); pw->instance_idx = inst - desc->instances->list; init_power_well_domains(inst, pw); plt_idx++; if (id == DISP_PW_ID_NONE) continue; drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); power_well_ids |= BIT_ULL(id); } return 0; } #define set_power_wells(power_domains, __power_well_descs) \ __set_power_wells(power_domains, __power_well_descs, \ ARRAY_SIZE(__power_well_descs)) /** * intel_display_power_map_init - initialize power domain -> power well mappings * @power_domains: power domain state * * Creates all the power wells for the current platform, initializes the * dynamic state for them and initializes the mapping of each power well to * all the power domains the power well belongs to. */ int intel_display_power_map_init(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); /* * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ if (!HAS_DISPLAY(i915)) { power_domains->power_well_count = 0; return 0; } if (DISPLAY_VER(i915) >= 14) return set_power_wells(power_domains, xelpdp_power_wells); else if (IS_DG2(i915)) return set_power_wells(power_domains, xehpd_power_wells); else if (DISPLAY_VER(i915) >= 13) return set_power_wells(power_domains, xelpd_power_wells); else if (IS_DG1(i915)) return set_power_wells(power_domains, dg1_power_wells); else if (IS_ALDERLAKE_S(i915)) return set_power_wells(power_domains, adls_power_wells); else if (IS_ROCKETLAKE(i915)) return set_power_wells(power_domains, rkl_power_wells); else if (DISPLAY_VER(i915) == 12) return set_power_wells(power_domains, tgl_power_wells); else if (DISPLAY_VER(i915) == 11) return set_power_wells(power_domains, icl_power_wells); else if (IS_GEMINILAKE(i915)) return set_power_wells(power_domains, glk_power_wells); else if (IS_BROXTON(i915)) return set_power_wells(power_domains, bxt_power_wells); else if (DISPLAY_VER(i915) == 9) return set_power_wells(power_domains, skl_power_wells); else if (IS_CHERRYVIEW(i915)) return set_power_wells(power_domains, chv_power_wells); else if (IS_BROADWELL(i915)) return set_power_wells(power_domains, bdw_power_wells); else if (IS_HASWELL(i915)) return set_power_wells(power_domains, hsw_power_wells); else if (IS_VALLEYVIEW(i915)) return set_power_wells(power_domains, vlv_power_wells); else if (IS_I830(i915)) return set_power_wells(power_domains, i830_power_wells); else return set_power_wells(power_domains, i9xx_power_wells); } /** * intel_display_power_map_cleanup - clean up power domain -> power well mappings * @power_domains: power domain state * * Cleans up all the state that was initialized by intel_display_power_map_init(). */ void intel_display_power_map_cleanup(struct i915_power_domains *power_domains) { kfree(power_domains->power_wells); }
linux-master
drivers/gpu/drm/i915/display/intel_display_power_map.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Daniel Vetter <[email protected]> * */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" #include "intel_pch_display.h" /** * DOC: fifo underrun handling * * The i915 driver checks for display fifo underruns using the interrupt signals * provided by the hardware. This is enabled by default and fairly useful to * debug display issues, especially watermark settings. * * If an underrun is detected this is logged into dmesg. To avoid flooding logs * and occupying the cpu underrun interrupts are disabled after the first * occurrence until the next modeset on a given pipe. * * Note that underrun detection on gmch platforms is a bit more ugly since there * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe * interrupt register). Also on some other platforms underrun interrupts are * shared, which means that if we detect an underrun we need to disable underrun * reporting on all pipes. * * The code also supports underrun detection on the PCH transcoder. */ static bool ivb_can_enable_err_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; enum pipe pipe; lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; } return true; } static bool cpt_can_enable_serr_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; struct intel_crtc *crtc; lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->pch_fifo_underrun_disabled) return false; } return true; } static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = PIPESTAT(crtc->pipe); u32 enable_mask; lockdep_assert_held(&dev_priv->irq_lock); if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0) return; enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe); intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); intel_de_posting_read(dev_priv, reg); trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe)); } static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPESTAT(pipe); lockdep_assert_held(&dev_priv->irq_lock); if (enable) { u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); intel_de_posting_read(dev_priv, reg); } else { if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(pipe)); } } static void ilk_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); u32 bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; if (enable) ilk_enable_display_irq(dev_priv, bit); else ilk_disable_display_irq(dev_priv, bit); } static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT); lockdep_assert_held(&dev_priv->irq_lock); if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0) return; intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); intel_de_posting_read(dev_priv, GEN7_ERR_INT); trace_intel_cpu_fifo_underrun(dev_priv, pipe); drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe)); } static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable, bool old) { struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); if (!ivb_can_enable_err_int(dev)) return; ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB); } else { ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB); if (old && intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { drm_err(&dev_priv->drm, "uncleared fifo underrun on pipe %c\n", pipe_name(pipe)); } } } static u32 icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv) { u32 mask = PIPE_STATUS_UNDERRUN; if (DISPLAY_VER(dev_priv) >= 13) mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD | PIPE_STATUS_HARD_UNDERRUN_XELPD | PIPE_STATUS_PORT_UNDERRUN_XELPD; return mask; } static void bdw_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); u32 mask = gen8_de_pipe_underrun_mask(dev_priv); if (enable) { if (DISPLAY_VER(dev_priv) >= 11) intel_de_write(dev_priv, ICL_PIPESTATUS(pipe), icl_pipe_status_underrun_mask(dev_priv)); bdw_enable_pipe_irq(dev_priv, pipe, mask); } else { bdw_disable_pipe_irq(dev_priv, pipe, mask); } } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pch_transcoder, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); u32 bit = (pch_transcoder == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) ibx_enable_display_interrupt(dev_priv, bit); else ibx_disable_display_interrupt(dev_priv, bit); } static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; u32 serr_int = intel_de_read(dev_priv, SERR_INT); lockdep_assert_held(&dev_priv->irq_lock); if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0) return; intel_de_write(dev_priv, SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); intel_de_posting_read(dev_priv, SERR_INT); trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n", pipe_name(pch_transcoder)); } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pch_transcoder, bool enable, bool old) { struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { intel_de_write(dev_priv, SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); if (!cpt_can_enable_serr_int(dev)) return; ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); } else { ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); if (old && intel_de_read(dev_priv, SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { drm_err(&dev_priv->drm, "uncleared pch fifo underrun on pch transcoder %c\n", pipe_name(pch_transcoder)); } } } static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); old = !crtc->cpu_fifo_underrun_disabled; crtc->cpu_fifo_underrun_disabled = !enable; if (HAS_GMCH(dev_priv)) i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv)) ilk_set_fifo_underrun_reporting(dev, pipe, enable); else if (DISPLAY_VER(dev_priv) == 7) ivb_set_fifo_underrun_reporting(dev, pipe, enable, old); else if (DISPLAY_VER(dev_priv) >= 8) bdw_set_fifo_underrun_reporting(dev, pipe, enable); return old; } /** * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state * @dev_priv: i915 device instance * @pipe: (CPU) pipe to set state for * @enable: whether underruns should be reported or not * * This function sets the fifo underrun state for @pipe. It is used in the * modeset code to avoid false positives since on many platforms underruns are * expected when disabling or enabling the pipe. * * Notice that on some platforms disabling underrun reports for one pipe * disables for all due to shared interrupts. Actual reporting is still per-pipe * though. * * Returns the previous state of underrun reporting. */ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { unsigned long flags; bool ret; spin_lock_irqsave(&dev_priv->irq_lock, flags); ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe, enable); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return ret; } /** * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state * @dev_priv: i915 device instance * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) * @enable: whether underruns should be reported or not * * This function makes us disable or enable PCH fifo underruns for a specific * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO * underrun reporting for one transcoder may also disable all the other PCH * error interruts for the other transcoders, due to the fact that there's just * one interrupt mask/enable bit for all the transcoders. * * Returns the previous state of underrun reporting. */ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pch_transcoder, bool enable) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pch_transcoder); unsigned long flags; bool old; /* * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT * has only one pch transcoder A that all pipes can use. To avoid racy * pch transcoder -> pipe lookups from interrupt code simply store the * underrun statistics in crtc A. Since we never expose this anywhere * nor use it outside of the fifo underrun code here using the "wrong" * crtc on LPT won't cause issues. */ spin_lock_irqsave(&dev_priv->irq_lock, flags); old = !crtc->pch_fifo_underrun_disabled; crtc->pch_fifo_underrun_disabled = !enable; if (HAS_PCH_IBX(dev_priv)) ibx_set_fifo_underrun_reporting(&dev_priv->drm, pch_transcoder, enable); else cpt_set_fifo_underrun_reporting(&dev_priv->drm, pch_transcoder, enable, old); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return old; } /** * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt * @dev_priv: i915 device instance * @pipe: (CPU) pipe to set state for * * This handles a CPU fifo underrun interrupt, generating an underrun warning * into dmesg if underrun reporting is enabled and then disables the underrun * interrupt to avoid an irq storm. */ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); u32 underruns = 0; /* We may be called too early in init, thanks BIOS! */ if (crtc == NULL) return; /* GMCH can't disable fifo underruns, filter them. */ if (HAS_GMCH(dev_priv) && crtc->cpu_fifo_underrun_disabled) return; /* * Starting with display version 11, the PIPE_STAT register records * whether an underrun has happened, and on XELPD+, it will also record * whether the underrun was soft/hard and whether it was triggered by * the downstream port logic. We should clear these bits (which use * write-1-to-clear logic) too. * * Note that although the IIR gives us the same underrun and soft/hard * information, PIPE_STAT is the only place we can find out whether * the underrun was caused by the downstream port. */ if (DISPLAY_VER(dev_priv) >= 11) { underruns = intel_de_read(dev_priv, ICL_PIPESTATUS(pipe)) & icl_pipe_status_underrun_mask(dev_priv); intel_de_write(dev_priv, ICL_PIPESTATUS(pipe), underruns); } if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) { trace_intel_cpu_fifo_underrun(dev_priv, pipe); if (DISPLAY_VER(dev_priv) >= 11) drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n", pipe_name(pipe), underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "", underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "", underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "", underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : ""); else drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } intel_fbc_handle_fifo_underrun_irq(dev_priv); } /** * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt * @dev_priv: i915 device instance * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) * * This handles a PCH fifo underrun interrupt, generating an underrun warning * into dmesg if underrun reporting is enabled and then disables the underrun * interrupt to avoid an irq storm. */ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pch_transcoder) { if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) { trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n", pipe_name(pch_transcoder)); } } /** * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately * @dev_priv: i915 device instance * * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared * error interrupt may have been disabled, and so CPU fifo underruns won't * necessarily raise an interrupt, and on GMCH platforms where underruns never * raise an interrupt. */ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; spin_lock_irq(&dev_priv->irq_lock); for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->cpu_fifo_underrun_disabled) continue; if (HAS_GMCH(dev_priv)) i9xx_check_fifo_underruns(crtc); else if (DISPLAY_VER(dev_priv) == 7) ivb_check_fifo_underruns(crtc); } spin_unlock_irq(&dev_priv->irq_lock); } /** * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately * @dev_priv: i915 device instance * * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared * error interrupt may have been disabled, and so PCH fifo underruns won't * necessarily raise an interrupt. */ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; spin_lock_irq(&dev_priv->irq_lock); for_each_intel_crtc(&dev_priv->drm, crtc) { if (crtc->pch_fifo_underrun_disabled) continue; if (HAS_PCH_CPT(dev_priv)) cpt_check_pch_fifo_underruns(crtc); } spin_unlock_irq(&dev_priv->irq_lock); } void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, struct intel_crtc *crtc, bool enable) { crtc->cpu_fifo_underrun_disabled = !enable; /* * We track the PCH trancoder underrun reporting state * within the crtc. With crtc for pipe A housing the underrun * reporting state for PCH transcoder A, crtc for pipe B housing * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, * and marking underrun reporting as disabled for the non-existing * PCH transcoders B and C would prevent enabling the south * error interrupt (see cpt_can_enable_serr_int()). */ if (intel_has_pch_trancoder(i915, crtc->pipe)) crtc->pch_fifo_underrun_disabled = !enable; }
linux-master
drivers/gpu/drm/i915/display/intel_fifo_underrun.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022-2023 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_vblank.h" #include "intel_vrr.h" /* * This timing diagram depicts the video signal in and * around the vertical blanking period. * * Assumptions about the fictitious mode used in this example: * vblank_start >= 3 * vsync_start = vblank_start + 1 * vsync_end = vblank_start + 2 * vtotal = vblank_start + 3 * * start of vblank: * latch double buffered registers * increment frame counter (ctg+) * generate start of vblank interrupt (gen4+) * | * | frame start: * | generate frame start interrupt (aka. vblank interrupt) (gmch) * | may be shifted forward 1-3 extra lines via TRANSCONF * | | * | | start of vsync: * | | generate vsync interrupt * | | | * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx * . \hs/ . \hs/ \hs/ \hs/ . \hs/ * ----va---> <-----------------vb--------------------> <--------va------------- * | | <----vs-----> | * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) * | | | * last visible pixel first visible pixel * | increment frame counter (gen3/4) * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) * * x = horizontal active * _ = horizontal blanking * hs = horizontal sync * va = vertical active * vb = vertical blanking * vs = vertical sync * vbs = vblank_start (number) * * Summary: * - most events happen at the start of horizontal sync * - frame start happens at the start of horizontal blank, 1-4 lines * (depending on TRANSCONF settings) after the start of vblank * - gen3/4 pixel and frame counter are synchronized with the start * of horizontal active on the first line of vertical active */ /* * Called from drm generic code, passed a 'crtc', which we use as a pipe index. */ u32 i915_get_vblank_counter(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; const struct drm_display_mode *mode = &vblank->hwmode; enum pipe pipe = to_intel_crtc(crtc)->pipe; u32 pixel, vbl_start, hsync_start, htotal; u64 frame; /* * On i965gm TV output the frame counter only works up to * the point when we enable the TV encoder. After that the * frame counter ceases to work and reads zero. We need a * vblank wait before enabling the TV encoder and so we * have to enable vblank interrupts while the frame counter * is still in a working state. However the core vblank code * does not like us returning non-zero frame counter values * when we've told it that we don't have a working frame * counter. Thus we must stop non-zero values leaking out. */ if (!vblank->max_vblank_count) return 0; htotal = mode->crtc_htotal; hsync_start = mode->crtc_hsync_start; vbl_start = mode->crtc_vblank_start; if (mode->flags & DRM_MODE_FLAG_INTERLACE) vbl_start = DIV_ROUND_UP(vbl_start, 2); /* Convert to pixel count */ vbl_start *= htotal; /* Start of vblank event occurs at start of hsync */ vbl_start -= htotal - hsync_start; /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(pipe), PIPEFRAME(pipe)); pixel = frame & PIPE_PIXEL_MASK; frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff; /* * The frame counter increments at beginning of active. * Cook up a vblank counter by also checking the pixel * counter against vblank start. */ return (frame + (pixel >= vbl_start)) & 0xffffff; } u32 g4x_get_vblank_counter(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; enum pipe pipe = to_intel_crtc(crtc)->pipe; if (!vblank->max_vblank_count) return 0; return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(pipe)); } static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_vblank_crtc *vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; const struct drm_display_mode *mode = &vblank->hwmode; u32 htotal = mode->crtc_htotal; u32 clock = mode->crtc_clock; u32 scan_prev_time, scan_curr_time, scan_post_time; /* * To avoid the race condition where we might cross into the * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR * during the same frame. */ do { /* * This field provides read back of the display * pipe frame time stamp. The time stamp value * is sampled at every start of vertical blank. */ scan_prev_time = intel_de_read_fw(dev_priv, PIPE_FRMTMSTMP(crtc->pipe)); /* * The TIMESTAMP_CTR register has the current * time stamp value. */ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); scan_post_time = intel_de_read_fw(dev_priv, PIPE_FRMTMSTMP(crtc->pipe)); } while (scan_post_time != scan_prev_time); return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, clock), 1000 * htotal); } /* * On certain encoders on certain platforms, pipe * scanline register will not work to get the scanline, * since the timings are driven from the PORT or issues * with scanline register updates. * This function will use Framestamp and current * timestamp registers to calculate the scanline. */ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) { struct drm_vblank_crtc *vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; const struct drm_display_mode *mode = &vblank->hwmode; u32 vblank_start = mode->crtc_vblank_start; u32 vtotal = mode->crtc_vtotal; u32 scanline; scanline = intel_crtc_scanlines_since_frame_timestamp(crtc); scanline = min(scanline, vtotal - 1); scanline = (scanline + vblank_start) % vtotal; return scanline; } /* * intel_de_read_fw(), only for fast reads of display block, no need for * forcewake etc. */ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); const struct drm_display_mode *mode; struct drm_vblank_crtc *vblank; enum pipe pipe = crtc->pipe; int position, vtotal; if (!crtc->active) return 0; vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; mode = &vblank->hwmode; if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) return __intel_get_crtc_scanline_from_timestamp(crtc); vtotal = mode->crtc_vtotal; if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we * read it just before the start of vblank. So try it again * so we don't accidentally end up spanning a vblank frame * increment, causing the pipe_update_end() code to squak at us. * * The nature of this problem means we can't simply check the ISR * bit and return the vblank start value; nor can we use the scanline * debug register in the transcoder as it appears to have the same * problem. We may need to extend this to include other platforms, * but so far testing only shows the problem on HSW. */ if (HAS_DDI(dev_priv) && !position) { int i, temp; for (i = 0; i < 100; i++) { udelay(1); temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; if (temp != position) { position = temp; break; } } } /* * See update_scanline_offset() for the details on the * scanline_offset adjustment. */ return (position + crtc->scanline_offset) % vtotal; } static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { struct drm_device *dev = _crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(_crtc); enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 || crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { drm_dbg(&dev_priv->drm, "trying to get scanoutpos for disabled pipe %c\n", pipe_name(pipe)); return false; } htotal = mode->crtc_htotal; hsync_start = mode->crtc_hsync_start; vtotal = mode->crtc_vtotal; vbl_start = mode->crtc_vblank_start; vbl_end = mode->crtc_vblank_end; if (mode->flags & DRM_MODE_FLAG_INTERLACE) { vbl_start = DIV_ROUND_UP(vbl_start, 2); vbl_end /= 2; vtotal /= 2; } /* * Lock uncore.lock, as we will do multiple timing critical raw * register reads, potentially with preemption disabled, so the * following code must not block on uncore.lock. */ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ /* Get optional system timestamp before query. */ if (stime) *stime = ktime_get(); if (crtc->mode_flags & I915_MODE_FLAG_VRR) { int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc); position = __intel_get_crtc_scanline(crtc); /* * Already exiting vblank? If so, shift our position * so it looks like we're already apporaching the full * vblank end. This should make the generated timestamp * more or less match when the active portion will start. */ if (position >= vbl_start && scanlines < position) position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1); } else if (use_scanline_counter) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ position = __intel_get_crtc_scanline(crtc); } else { /* * Have access to pixelcount since start of frame. * We can split this into vertical and horizontal * scanout position. */ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; /* convert to pixel counts */ vbl_start *= htotal; vbl_end *= htotal; vtotal *= htotal; /* * In interlaced modes, the pixel counter counts all pixels, * so one field will have htotal more pixels. In order to avoid * the reported position from jumping backwards when the pixel * counter is beyond the length of the shorter field, just * clamp the position the length of the shorter field. This * matches how the scanline counter based position works since * the scanline counter doesn't count the two half lines. */ position = min(position, vtotal - 1); /* * Start of vblank interrupt is triggered at start of hsync, * just prior to the first active line of vblank. However we * consider lines to start at the leading edge of horizontal * active. So, should we get here before we've crossed into * the horizontal active of the first line in vblank, we would * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, * always add htotal-hsync_start to the current pixel position. */ position = (position + htotal - hsync_start) % vtotal; } /* Get optional system timestamp after query. */ if (etime) *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); /* * While in vblank, position will be negative * counting up towards 0 at vbl_end. And outside * vblank, position will be positive counting * up since vbl_end. */ if (position >= vbl_start) position -= vbl_end; else position += vtotal - vbl_end; if (use_scanline_counter) { *vpos = position; *hpos = 0; } else { *vpos = position / htotal; *hpos = position - (*vpos * htotal); } return true; } bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq) { return drm_crtc_vblank_helper_get_vblank_timestamp_internal( crtc, max_error, vblank_time, in_vblank_irq, i915_get_crtc_scanoutpos); } int intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); unsigned long irqflags; int position; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); position = __intel_get_crtc_scanline(crtc); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); return position; } static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; msleep(5); line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; return line1 != line2; } static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* Wait for the display line to settle/start moving */ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) drm_err(&dev_priv->drm, "pipe %c scanline %s wait timed out\n", pipe_name(pipe), str_on_off(state)); } void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) { wait_for_pipe_scanline_moving(crtc, false); } void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) { wait_for_pipe_scanline_moving(crtc, true); } static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* * The scanline counter increments at the leading edge of hsync. * * On most platforms it starts counting from vtotal-1 on the * first active line. That means the scanline counter value is * always one less than what we would expect. Ie. just after * start of vblank, which also occurs at start of hsync (on the * last active line), the scanline counter will read vblank_start-1. * * On gen2 the scanline counter starts counting from 1 instead * of vtotal-1, so we have to subtract one (or rather add vtotal-1 * to keep the value positive), instead of adding one. * * On HSW+ the behaviour of the scanline counter depends on the output * type. For DP ports it behaves like most other platforms, but on HDMI * there's an extra 1 line difference. So we need to add two instead of * one to the value. * * On VLV/CHV DSI the scanline counter would appear to increment * approx. 1/3 of a scanline before start of vblank. Unfortunately * that means we can't tell whether we're in vblank or not while * we're on that particular line. We must still set scanline_offset * to 1 so that the vblank timestamps come out correct when we query * the scanline counter from within the vblank interrupt handler. * However if queried just before the start of vblank we'll get an * answer that's slightly in the future. */ if (DISPLAY_VER(i915) == 2) { int vtotal; vtotal = adjusted_mode->crtc_vtotal; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; return vtotal - 1; } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { return 2; } else { return 1; } } void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, bool vrr_enable) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u8 mode_flags = crtc_state->mode_flags; struct drm_display_mode adjusted_mode; int vmax_vblank_start = 0; unsigned long irqflags; drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); if (vrr_enable) { drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0); adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); } else { mode_flags &= ~I915_MODE_FLAG_VRR; } /* * Belts and suspenders locking to guarantee everyone sees 100% * consistent state during fastset seamless refresh rate changes. * * vblank_time_lock takes care of all drm_vblank.c stuff, and * uncore.lock takes care of __intel_get_crtc_scanline() which * may get called elsewhere as well. * * TODO maybe just protect everything (including * __intel_get_crtc_scanline()) with vblank_time_lock? * Need to audit everything to make sure it's safe. */ spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags); spin_lock(&i915->uncore.lock); drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); crtc->vmax_vblank_start = vmax_vblank_start; crtc->mode_flags = mode_flags; crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); spin_unlock(&i915->uncore.lock); spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); }
linux-master
drivers/gpu/drm/i915/display/intel_vblank.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * DOC: Frame Buffer Compression (FBC) * * FBC tries to save memory bandwidth (and so power consumption) by * compressing the amount of memory used by the display. It is total * transparent to user space and completely handled in the kernel. * * The benefits of FBC are mostly visible with solid backgrounds and * variation-less patterns. It comes from keeping the memory footprint small * and having fewer memory pages opened and accessed for refreshing the display. * * i915 is responsible to reserve stolen memory for FBC and configure its * offset on proper registers. The hardware takes care of all * compress/decompress. However there are many known cases where we have to * forcibly disable it to allow proper screen updates. */ #include <linux/string_helpers.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "i915_vma.h" #include "intel_cdclk.h" #include "intel_de.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" #define for_each_fbc_id(__dev_priv, __fbc_id) \ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ for_each_fbc_id((__dev_priv), (__fbc_id)) \ for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)]) struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); void (*deactivate)(struct intel_fbc *fbc); bool (*is_active)(struct intel_fbc *fbc); bool (*is_compressing)(struct intel_fbc *fbc); void (*nuke)(struct intel_fbc *fbc); void (*program_cfb)(struct intel_fbc *fbc); void (*set_false_color)(struct intel_fbc *fbc, bool enable); }; struct intel_fbc_state { struct intel_plane *plane; unsigned int cfb_stride; unsigned int cfb_size; unsigned int fence_y_offset; u16 override_cfb_stride; u16 interval; s8 fence_id; }; struct intel_fbc { struct drm_i915_private *i915; const struct intel_fbc_funcs *funcs; /* * This is always the inner lock when overlapping with * struct_mutex and it's the outer lock when overlapping * with stolen_lock. */ struct mutex lock; unsigned int busy_bits; struct i915_stolen_fb compressed_fb, compressed_llb; enum intel_fbc_id id; u8 limit; bool false_color; bool active; bool activated; bool flip_pending; bool underrun_detected; struct work_struct underrun_work; /* * This structure contains everything that's relevant to program the * hardware registers. When we want to figure out if we need to disable * and re-enable FBC for a new configuration we just check if there's * something different in the struct. The genx_fbc_activate functions * are supposed to read from it in order to program the registers. */ struct intel_fbc_state state; const char *no_fbc_reason; }; /* plane stride in pixels */ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int stride; stride = plane_state->view.color_plane[0].mapping_stride; if (!drm_rotation_90_or_270(plane_state->hw.rotation)) stride /= fb->format->cpp[0]; return stride; } /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ return intel_fbc_plane_stride(plane_state) * cpp; } /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int limit = 4; /* 1:4 compression limit is the worst case */ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; unsigned int height = 4; /* FBC segment is 4 lines */ unsigned int stride; /* minimum segment stride we can use */ stride = width * cpp * height / limit; /* * Wa_16011863758: icl+ * Avoid some hardware segment address miscalculation. */ if (DISPLAY_VER(i915) >= 11) stride += 64; /* * At least some of the platforms require each 4 line segment to * be 512 byte aligned. Just do it always for simplicity. */ stride = ALIGN(stride, 512); /* convert back to single line equivalent with 1:1 compression limit */ return stride * limit / height; } /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int stride = _intel_fbc_cfb_stride(plane_state); /* * At least some of the platforms require each 4 line segment to * be 512 byte aligned. Aligning each line to 512 bytes guarantees * that regardless of the compression limit we choose later. */ if (DISPLAY_VER(i915) >= 9) return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); else return stride; } static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); int lines = drm_rect_height(&plane_state->uapi.src) >> 16; if (DISPLAY_VER(i915) == 7) lines = min(lines, 2048); else if (DISPLAY_VER(i915) >= 8) lines = min(lines, 2560); return lines * intel_fbc_cfb_stride(plane_state); } static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); unsigned int stride = _intel_fbc_cfb_stride(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; /* * Override stride in 64 byte units per 4 line segment. * * Gen9 hw miscalculates cfb stride for linear as * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so * we always need to use the override there. */ if (stride != stride_aligned || (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) return stride_aligned * 4 / 64; return 0; } static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; unsigned int cfb_stride; u32 fbc_ctl; cfb_stride = fbc_state->cfb_stride / fbc->limit; /* FBC_CTL wants 32B or 64B units */ if (DISPLAY_VER(i915) == 2) cfb_stride = (cfb_stride / 32) - 1; else cfb_stride = (cfb_stride / 64) - 1; fbc_ctl = FBC_CTL_PERIODIC | FBC_CTL_INTERVAL(fbc_state->interval) | FBC_CTL_STRIDE(cfb_stride); if (IS_I945GM(i915)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ if (fbc_state->fence_id >= 0) fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); return fbc_ctl; } static u32 i965_fbc_ctl2(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; u32 fbc_ctl2; fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); if (fbc_state->fence_id >= 0) fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; return fbc_ctl2; } static void i8xx_fbc_deactivate(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; u32 fbc_ctl; /* Disable compression */ fbc_ctl = intel_de_read(i915, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; intel_de_write(i915, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ if (intel_de_wait_for_clear(i915, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); return; } } static void i8xx_fbc_activate(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; int i; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) intel_de_write(i915, FBC_TAG(i), 0); if (DISPLAY_VER(i915) == 4) { intel_de_write(i915, FBC_CONTROL2, i965_fbc_ctl2(fbc)); intel_de_write(i915, FBC_FENCE_OFF, fbc_state->fence_y_offset); } intel_de_write(i915, FBC_CONTROL, FBC_CTL_EN | i8xx_fbc_ctl(fbc)); } static bool i8xx_fbc_is_active(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; } static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, FBC_STATUS) & (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); } static void i8xx_fbc_nuke(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; struct drm_i915_private *dev_priv = fbc->i915; intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); } static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_fb), U32_MAX)); GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_llb), U32_MAX)); intel_de_write(i915, FBC_CFB_BASE, i915_gem_stolen_node_address(i915, &fbc->compressed_fb)); intel_de_write(i915, FBC_LL_BASE, i915_gem_stolen_node_address(i915, &fbc->compressed_llb)); } static const struct intel_fbc_funcs i8xx_fbc_funcs = { .activate = i8xx_fbc_activate, .deactivate = i8xx_fbc_deactivate, .is_active = i8xx_fbc_is_active, .is_compressing = i8xx_fbc_is_compressing, .nuke = i8xx_fbc_nuke, .program_cfb = i8xx_fbc_program_cfb, }; static void i965_fbc_nuke(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; struct drm_i915_private *dev_priv = fbc->i915; intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); } static const struct intel_fbc_funcs i965_fbc_funcs = { .activate = i8xx_fbc_activate, .deactivate = i8xx_fbc_deactivate, .is_active = i8xx_fbc_is_active, .is_compressing = i8xx_fbc_is_compressing, .nuke = i965_fbc_nuke, .program_cfb = i8xx_fbc_program_cfb, }; static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) { switch (fbc->limit) { default: MISSING_CASE(fbc->limit); fallthrough; case 1: return DPFC_CTL_LIMIT_1X; case 2: return DPFC_CTL_LIMIT_2X; case 4: return DPFC_CTL_LIMIT_4X; } } static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); if (IS_G4X(i915)) dpfc_ctl |= DPFC_CTL_SR_EN; if (fbc_state->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; if (DISPLAY_VER(i915) < 6) dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); } return dpfc_ctl; } static void g4x_fbc_activate(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, DPFC_FENCE_YOFF, fbc_state->fence_y_offset); intel_de_write(i915, DPFC_CONTROL, DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } static void g4x_fbc_deactivate(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; /* Disable compression */ dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); } } static bool g4x_fbc_is_active(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; } static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; } static void g4x_fbc_program_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, DPFC_CB_BASE, i915_gem_stolen_node_offset(&fbc->compressed_fb)); } static const struct intel_fbc_funcs g4x_fbc_funcs = { .activate = g4x_fbc_activate, .deactivate = g4x_fbc_deactivate, .is_active = g4x_fbc_is_active, .is_compressing = g4x_fbc_is_compressing, .nuke = i965_fbc_nuke, .program_cfb = g4x_fbc_program_cfb, }; static void ilk_fbc_activate(struct intel_fbc *fbc) { struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id), fbc_state->fence_y_offset); intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } static void ilk_fbc_deactivate(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; /* Disable compression */ dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id)); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); } } static bool ilk_fbc_is_active(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; } static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; } static void ilk_fbc_program_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), i915_gem_stolen_node_offset(&fbc->compressed_fb)); } static const struct intel_fbc_funcs ilk_fbc_funcs = { .activate = ilk_fbc_activate, .deactivate = ilk_fbc_deactivate, .is_active = ilk_fbc_is_active, .is_compressing = ilk_fbc_is_compressing, .nuke = i965_fbc_nuke, .program_cfb = ilk_fbc_program_cfb, }; static void snb_fbc_program_fence(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 ctl = 0; if (fbc_state->fence_id >= 0) ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); } static void snb_fbc_activate(struct intel_fbc *fbc) { snb_fbc_program_fence(fbc); ilk_fbc_activate(fbc); } static void snb_fbc_nuke(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id)); } static const struct intel_fbc_funcs snb_fbc_funcs = { .activate = snb_fbc_activate, .deactivate = ilk_fbc_deactivate, .is_active = ilk_fbc_is_active, .is_compressing = ilk_fbc_is_compressing, .nuke = snb_fbc_nuke, .program_cfb = ilk_fbc_program_cfb, }; static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 val = 0; if (fbc_state->override_cfb_stride) val |= FBC_STRIDE_OVERRIDE | FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val); } static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 val = 0; /* Display WA #0529: skl, kbl, bxt. */ if (fbc_state->override_cfb_stride) val |= CHICKEN_FBC_STRIDE_OVERRIDE | CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); intel_de_rmw(i915, CHICKEN_MISC_4, CHICKEN_FBC_STRIDE_OVERRIDE | CHICKEN_FBC_STRIDE_MASK, val); } static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc); if (IS_IVYBRIDGE(i915)) dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); if (fbc_state->fence_id >= 0) dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; if (fbc->false_color) dpfc_ctl |= DPFC_CTL_FALSE_COLOR; return dpfc_ctl; } static void ivb_fbc_activate(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; if (DISPLAY_VER(i915) >= 10) glk_fbc_program_cfb_stride(fbc); else if (DISPLAY_VER(i915) == 9) skl_fbc_program_cfb_stride(fbc); if (intel_gt_support_legacy_fencing(to_gt(i915))) snb_fbc_program_fence(fbc); intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); } static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; } static void ivb_fbc_set_false_color(struct intel_fbc *fbc, bool enable) { intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); } static const struct intel_fbc_funcs ivb_fbc_funcs = { .activate = ivb_fbc_activate, .deactivate = ilk_fbc_deactivate, .is_active = ilk_fbc_is_active, .is_compressing = ivb_fbc_is_compressing, .nuke = snb_fbc_nuke, .program_cfb = ilk_fbc_program_cfb, .set_false_color = ivb_fbc_set_false_color, }; static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) { return fbc->funcs->is_active(fbc); } static void intel_fbc_hw_activate(struct intel_fbc *fbc) { trace_intel_fbc_activate(fbc->state.plane); fbc->active = true; fbc->activated = true; fbc->funcs->activate(fbc); } static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) { trace_intel_fbc_deactivate(fbc->state.plane); fbc->active = false; fbc->funcs->deactivate(fbc); } static bool intel_fbc_is_compressing(struct intel_fbc *fbc) { return fbc->funcs->is_compressing(fbc); } static void intel_fbc_nuke(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; lockdep_assert_held(&fbc->lock); drm_WARN_ON(&i915->drm, fbc->flip_pending); trace_intel_fbc_nuke(fbc->state.plane); fbc->funcs->nuke(fbc); } static void intel_fbc_activate(struct intel_fbc *fbc) { lockdep_assert_held(&fbc->lock); intel_fbc_hw_activate(fbc); intel_fbc_nuke(fbc); fbc->no_fbc_reason = NULL; } static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) { lockdep_assert_held(&fbc->lock); if (fbc->active) intel_fbc_hw_deactivate(fbc); fbc->no_fbc_reason = reason; } static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) return BIT_ULL(28); else return BIT_ULL(32); } static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) { u64 end; /* The FBC hardware for BDW/SKL doesn't have access to the stolen * reserved range size, so it always assumes the maximum (8mb) is used. * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ if (IS_BROADWELL(i915) || (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024; else end = U64_MAX; return min(end, intel_fbc_cfb_base_max(i915)); } static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) { return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; } static int intel_fbc_max_limit(struct drm_i915_private *i915) { /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(i915)) return 1; /* * FBC2 can only do 1:1, 1:2, 1:4, we limit * FBC1 to the same out of convenience. */ return 4; } static int find_compression_limit(struct intel_fbc *fbc, unsigned int size, int min_limit) { struct drm_i915_private *i915 = fbc->i915; u64 end = intel_fbc_stolen_end(i915); int ret, limit = min_limit; size /= limit; /* Try to over-allocate to reduce reallocations and fragmentation. */ ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size <<= 1, 4096, 0, end); if (ret == 0) return limit; for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) return limit; } return 0; } static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, unsigned int size, int min_limit) { struct drm_i915_private *i915 = fbc->i915; int ret; drm_WARN_ON(&i915->drm, i915_gem_stolen_node_allocated(&fbc->compressed_fb)); drm_WARN_ON(&i915->drm, i915_gem_stolen_node_allocated(&fbc->compressed_llb)); if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 4096, 4096); if (ret) goto err; } ret = find_compression_limit(fbc, size, min_limit); if (!ret) goto err_llb; else if (ret > min_limit) drm_info_once(&i915->drm, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); fbc->limit = ret; drm_dbg_kms(&i915->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit); return 0; err_llb: if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); err: if (i915_gem_stolen_initialized(i915)) drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } static void intel_fbc_program_cfb(struct intel_fbc *fbc) { fbc->funcs->program_cfb(fbc); } static void intel_fbc_program_workarounds(struct intel_fbc *fbc) { /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915)) intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); } static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); if (i915_gem_stolen_node_allocated(&fbc->compressed_fb)) i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); } void intel_fbc_cleanup(struct drm_i915_private *i915) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) { mutex_lock(&fbc->lock); __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); kfree(fbc); } } static bool stride_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int stride = intel_fbc_plane_stride(plane_state) * fb->format->cpp[0]; /* This should have been caught earlier. */ if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) return false; /* Below are the additional FBC restrictions. */ if (stride < 512) return false; if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) return stride == 4096 || stride == 8192; if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) return false; /* Display WA #1105: skl,bxt,kbl,cfl,glk */ if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; if (stride > 16384) return false; return true; } static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; switch (fb->format->format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ if (DISPLAY_VER(i915) == 2) return false; /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(i915)) return false; return true; default: return false; } } static bool rotation_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && rotation != DRM_MODE_ROTATE_0) return false; return true; } /* * For some reason, the hardware tracking starts looking at whatever we * programmed as the display plane base address register. It does not look at * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; if (DISPLAY_VER(i915) >= 10) { max_w = 5120; max_h = 4096; } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { max_w = 4096; max_h = 4096; } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { max_w = 4096; max_h = 2048; } else { max_w = 2048; max_h = 1536; } effective_w = plane_state->view.color_plane[0].x + (drm_rect_width(&plane_state->uapi.src) >> 16); effective_h = plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16); return effective_w <= max_w && effective_h <= max_h; } static bool tiling_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: return DISPLAY_VER(i915) >= 9; case I915_FORMAT_MOD_4_TILED: case I915_FORMAT_MOD_X_TILED: return true; default: return false; } } static void intel_fbc_update_state(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = plane->fbc; struct intel_fbc_state *fbc_state = &fbc->state; WARN_ON(plane_state->no_fbc_reason); WARN_ON(fbc_state->plane && fbc_state->plane != plane); fbc_state->plane = plane; /* FBC1 compression interval: arbitrary choice of 1 second */ fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && !intel_gt_support_legacy_fencing(to_gt(i915))); if (plane_state->flags & PLANE_HAS_FENCE) fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma); else fbc_state->fence_id = -1; fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); } static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); /* * The use of a CPU fence is one of two ways to detect writes by the * CPU to the scanout and trigger updates to the FBC. * * The other method is by software tracking (see * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke * the current compressed buffer and recompress it. * * Note that is possible for a tiled surface to be unmappable (and * so have no fence associated with it) due to aperture constraints * at the time of pinning. */ return DISPLAY_VER(i915) >= 9 || (plane_state->flags & PLANE_HAS_FENCE && i915_vma_fence_id(plane_state->ggtt_vma) != -1); } static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct intel_fbc *fbc = plane->fbc; return intel_fbc_min_limit(plane_state) <= fbc->limit && intel_fbc_cfb_size(plane_state) <= fbc->limit * i915_gem_stolen_node_size(&fbc->compressed_fb); } static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) { return !plane_state->no_fbc_reason && intel_fbc_is_fence_ok(plane_state) && intel_fbc_is_cfb_ok(plane_state); } static int intel_fbc_check_plane(struct intel_atomic_state *state, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); const struct drm_framebuffer *fb = plane_state->hw.fb; struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); const struct intel_crtc_state *crtc_state; struct intel_fbc *fbc = plane->fbc; if (!fbc) return 0; if (!i915_gem_stolen_initialized(i915)) { plane_state->no_fbc_reason = "stolen memory not initialised"; return 0; } if (intel_vgpu_active(i915)) { plane_state->no_fbc_reason = "VGPU active"; return 0; } if (!i915->params.enable_fbc) { plane_state->no_fbc_reason = "disabled per module param or by default"; return 0; } if (!plane_state->uapi.visible) { plane_state->no_fbc_reason = "plane not visible"; return 0; } crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { plane_state->no_fbc_reason = "interlaced mode not supported"; return 0; } if (crtc_state->double_wide) { plane_state->no_fbc_reason = "double wide pipe not supported"; return 0; } /* * Display 12+ is not supporting FBC with PSR2. * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } /* Wa_14016291713 */ if ((IS_DISPLAY_VER(i915, 12, 13) || IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) && crtc_state->has_psr) { plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; return 0; } if (!pixel_format_is_valid(plane_state)) { plane_state->no_fbc_reason = "pixel format not supported"; return 0; } if (!tiling_is_valid(plane_state)) { plane_state->no_fbc_reason = "tiling not supported"; return 0; } if (!rotation_is_valid(plane_state)) { plane_state->no_fbc_reason = "rotation not supported"; return 0; } if (!stride_is_valid(plane_state)) { plane_state->no_fbc_reason = "stride not supported"; return 0; } if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && fb->format->has_alpha) { plane_state->no_fbc_reason = "per-pixel alpha not supported"; return 0; } if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { plane_state->no_fbc_reason = "plane size too big"; return 0; } /* * Work around a problem on GEN9+ HW, where enabling FBC on a plane * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ if (DISPLAY_VER(i915) >= 9 && plane_state->view.color_plane[0].y & 3) { plane_state->no_fbc_reason = "plane start Y offset misaligned"; return 0; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ if (DISPLAY_VER(i915) >= 11 && (plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; return 0; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { const struct intel_cdclk_state *cdclk_state; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { plane_state->no_fbc_reason = "pixel rate too high"; return 0; } } plane_state->no_fbc_reason = NULL; return 0; } static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; if (intel_crtc_needs_modeset(new_crtc_state)) return false; if (!intel_fbc_is_ok(old_plane_state) || !intel_fbc_is_ok(new_plane_state)) return false; if (old_fb->format->format != new_fb->format->format) return false; if (old_fb->modifier != new_fb->modifier) return false; if (intel_fbc_plane_stride(old_plane_state) != intel_fbc_plane_stride(new_plane_state)) return false; if (intel_fbc_cfb_stride(old_plane_state) != intel_fbc_cfb_stride(new_plane_state)) return false; if (intel_fbc_cfb_size(old_plane_state) != intel_fbc_cfb_size(new_plane_state)) return false; if (intel_fbc_override_cfb_stride(old_plane_state) != intel_fbc_override_cfb_stride(new_plane_state)) return false; return true; } static bool __intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_fbc *fbc = plane->fbc; bool need_vblank_wait = false; lockdep_assert_held(&fbc->lock); fbc->flip_pending = true; if (intel_fbc_can_flip_nuke(state, crtc, plane)) return need_vblank_wait; intel_fbc_deactivate(fbc, "update pending"); /* * Display WA #1198: glk+ * Need an extra vblank wait between FBC disable and most plane * updates. Bspec says this is only needed for plane disable, but * that is not true. Touching most plane registers will cause the * corruption to appear. Also SKL/derivatives do not seem to be * affected. * * TODO: could optimize this a bit by sampling the frame * counter when we disable FBC (if it was already done earlier) * and skipping the extra vblank wait before the plane update * if at least one frame has already passed. */ if (fbc->activated && DISPLAY_VER(i915) >= 10) need_vblank_wait = true; fbc->activated = false; return need_vblank_wait; } bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_plane_state __maybe_unused *plane_state; bool need_vblank_wait = false; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_fbc *fbc = plane->fbc; if (!fbc || plane->pipe != crtc->pipe) continue; mutex_lock(&fbc->lock); if (fbc->state.plane == plane) need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); mutex_unlock(&fbc->lock); } return need_vblank_wait; } static void __intel_fbc_disable(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; struct intel_plane *plane = fbc->state.plane; lockdep_assert_held(&fbc->lock); drm_WARN_ON(&i915->drm, fbc->active); drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); __intel_fbc_cleanup_cfb(fbc); fbc->state.plane = NULL; fbc->flip_pending = false; fbc->busy_bits = 0; } static void __intel_fbc_post_update(struct intel_fbc *fbc) { lockdep_assert_held(&fbc->lock); fbc->flip_pending = false; if (!fbc->busy_bits) intel_fbc_activate(fbc); else intel_fbc_deactivate(fbc, "frontbuffer write"); } void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_fbc *fbc = plane->fbc; if (!fbc || plane->pipe != crtc->pipe) continue; mutex_lock(&fbc->lock); if (fbc->state.plane == plane) __intel_fbc_post_update(fbc); mutex_unlock(&fbc->lock); } } static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) { if (fbc->state.plane) return fbc->state.plane->frontbuffer_bit; else return 0; } static void __intel_fbc_invalidate(struct intel_fbc *fbc, unsigned int frontbuffer_bits, enum fb_op_origin origin) { if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) return; mutex_lock(&fbc->lock); frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); if (!frontbuffer_bits) goto out; fbc->busy_bits |= frontbuffer_bits; intel_fbc_deactivate(fbc, "frontbuffer write"); out: mutex_unlock(&fbc->lock); } void intel_fbc_invalidate(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); } static void __intel_fbc_flush(struct intel_fbc *fbc, unsigned int frontbuffer_bits, enum fb_op_origin origin) { mutex_lock(&fbc->lock); frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); if (!frontbuffer_bits) goto out; fbc->busy_bits &= ~frontbuffer_bits; if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) goto out; if (fbc->busy_bits || fbc->flip_pending) goto out; if (fbc->active) intel_fbc_nuke(fbc); else intel_fbc_activate(fbc); out: mutex_unlock(&fbc->lock); } void intel_fbc_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) __intel_fbc_flush(fbc, frontbuffer_bits, origin); } int intel_fbc_atomic_check(struct intel_atomic_state *state) { struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { int ret; ret = intel_fbc_check_plane(state, plane); if (ret) return ret; } return 0; } static void __intel_fbc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = plane->fbc; lockdep_assert_held(&fbc->lock); if (fbc->state.plane) { if (fbc->state.plane != plane) return; if (intel_fbc_is_ok(plane_state)) { intel_fbc_update_state(state, crtc, plane); return; } __intel_fbc_disable(fbc); } drm_WARN_ON(&i915->drm, fbc->active); fbc->no_fbc_reason = plane_state->no_fbc_reason; if (fbc->no_fbc_reason) return; if (!intel_fbc_is_fence_ok(plane_state)) { fbc->no_fbc_reason = "framebuffer not fenced"; return; } if (fbc->underrun_detected) { fbc->no_fbc_reason = "FIFO underrun"; return; } if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), intel_fbc_min_limit(plane_state))) { fbc->no_fbc_reason = "not enough stolen memory"; return; } drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; intel_fbc_update_state(state, crtc, plane); intel_fbc_program_workarounds(fbc); intel_fbc_program_cfb(fbc); } /** * intel_fbc_disable - disable FBC if it's associated with crtc * @crtc: the CRTC * * This function disables FBC if it's associated with the provided CRTC. */ void intel_fbc_disable(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_plane *plane; for_each_intel_plane(&i915->drm, plane) { struct intel_fbc *fbc = plane->fbc; if (!fbc || plane->pipe != crtc->pipe) continue; mutex_lock(&fbc->lock); if (fbc->state.plane == plane) __intel_fbc_disable(fbc); mutex_unlock(&fbc->lock); } } void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_fbc *fbc = plane->fbc; if (!fbc || plane->pipe != crtc->pipe) continue; mutex_lock(&fbc->lock); if (intel_crtc_needs_fastset(crtc_state) && plane_state->no_fbc_reason) { if (fbc->state.plane == plane) __intel_fbc_disable(fbc); } else { __intel_fbc_enable(state, crtc, plane); } mutex_unlock(&fbc->lock); } } static void intel_fbc_underrun_work_fn(struct work_struct *work) { struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); struct drm_i915_private *i915 = fbc->i915; mutex_lock(&fbc->lock); /* Maybe we were scheduled twice. */ if (fbc->underrun_detected || !fbc->state.plane) goto out; drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; intel_fbc_deactivate(fbc, "FIFO underrun"); if (!fbc->flip_pending) intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); } static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; cancel_work_sync(&fbc->underrun_work); mutex_lock(&fbc->lock); if (fbc->underrun_detected) { drm_dbg_kms(&i915->drm, "Re-allowing FBC after fifo underrun\n"); fbc->no_fbc_reason = "FIFO underrun cleared"; } fbc->underrun_detected = false; mutex_unlock(&fbc->lock); } /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. * @i915: the i915 device * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ void intel_fbc_reset_underrun(struct drm_i915_private *i915) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) __intel_fbc_reset_underrun(fbc); } static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) { /* * There's no guarantee that underrun_detected won't be set to true * right after this check and before the work is scheduled, but that's * not a problem since we'll check it again under the work function * while FBC is locked. This check here is just to prevent us from * unnecessarily scheduling the work, and it relies on the fact that we * never switch underrun_detect back to false after it's true. */ if (READ_ONCE(fbc->underrun_detected)) return; queue_work(fbc->i915->unordered_wq, &fbc->underrun_work); } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun * @i915: i915 device * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can * become black screens or even worse, especially when paired with bad * watermarks. So in order for us to be on the safe side, completely disable FBC * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe * already suggests that watermarks may be bad, so try to be as safe as * possible. * * This function is called from the IRQ handler. */ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) __intel_fbc_handle_fifo_underrun_irq(fbc); } /* * The DDX driver changes its behavior depending on the value it reads from * i915.enable_fbc, so sanitize it by translating the default value into either * 0 or 1 in order to allow it to know what's going on. * * Notice that this is done at driver initialization and we still allow user * space to change the value during runtime without sanitizing it again. IGT * relies on being able to change i915.enable_fbc at runtime. */ static int intel_sanitize_fbc_option(struct drm_i915_private *i915) { if (i915->params.enable_fbc >= 0) return !!i915->params.enable_fbc; if (!HAS_FBC(i915)) return 0; if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) return 1; return 0; } static bool need_fbc_vtd_wa(struct drm_i915_private *i915) { /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { drm_info(&i915->drm, "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); return true; } return false; } void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) { plane->fbc = fbc; } static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, enum intel_fbc_id fbc_id) { struct intel_fbc *fbc; fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); if (!fbc) return NULL; fbc->id = fbc_id; fbc->i915 = i915; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); if (DISPLAY_VER(i915) >= 7) fbc->funcs = &ivb_fbc_funcs; else if (DISPLAY_VER(i915) == 6) fbc->funcs = &snb_fbc_funcs; else if (DISPLAY_VER(i915) == 5) fbc->funcs = &ilk_fbc_funcs; else if (IS_G4X(i915)) fbc->funcs = &g4x_fbc_funcs; else if (DISPLAY_VER(i915) == 4) fbc->funcs = &i965_fbc_funcs; else fbc->funcs = &i8xx_fbc_funcs; return fbc; } /** * intel_fbc_init - Initialize FBC * @i915: the i915 device * * This function might be called during PM init process. */ void intel_fbc_init(struct drm_i915_private *i915) { enum intel_fbc_id fbc_id; if (need_fbc_vtd_wa(i915)) DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); for_each_fbc_id(i915, fbc_id) i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); } /** * intel_fbc_sanitize - Sanitize FBC * @i915: the i915 device * * Make sure FBC is initially disabled since we have no * idea eg. into which parts of stolen it might be scribbling * into. */ void intel_fbc_sanitize(struct drm_i915_private *i915) { struct intel_fbc *fbc; enum intel_fbc_id fbc_id; for_each_intel_fbc(i915, fbc, fbc_id) { if (intel_fbc_hw_is_active(fbc)) intel_fbc_hw_deactivate(fbc); } } static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_fbc *fbc = m->private; struct drm_i915_private *i915 = fbc->i915; struct intel_plane *plane; intel_wakeref_t wakeref; drm_modeset_lock_all(&i915->drm); wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_lock(&fbc->lock); if (fbc->active) { seq_puts(m, "FBC enabled\n"); seq_printf(m, "Compressing: %s\n", str_yes_no(intel_fbc_is_compressing(fbc))); } else { seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); } for_each_intel_plane(&i915->drm, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); if (plane->fbc != fbc) continue; seq_printf(m, "%c [PLANE:%d:%s]: %s\n", fbc->state.plane == plane ? '*' : ' ', plane->base.base.id, plane->base.name, plane_state->no_fbc_reason ?: "FBC possible"); } mutex_unlock(&fbc->lock); intel_runtime_pm_put(&i915->runtime_pm, wakeref); drm_modeset_unlock_all(&i915->drm); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) { struct intel_fbc *fbc = data; *val = fbc->false_color; return 0; } static int intel_fbc_debugfs_false_color_set(void *data, u64 val) { struct intel_fbc *fbc = data; mutex_lock(&fbc->lock); fbc->false_color = val; if (fbc->active) fbc->funcs->set_false_color(fbc, fbc->false_color); mutex_unlock(&fbc->lock); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, intel_fbc_debugfs_false_color_get, intel_fbc_debugfs_false_color_set, "%llu\n"); static void intel_fbc_debugfs_add(struct intel_fbc *fbc, struct dentry *parent) { debugfs_create_file("i915_fbc_status", 0444, parent, fbc, &intel_fbc_debugfs_status_fops); if (fbc->funcs->set_false_color) debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent, fbc, &intel_fbc_debugfs_false_color_fops); } void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); if (plane->fbc) intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry); } /* FIXME: remove this once igt is on board with per-crtc stuff */ void intel_fbc_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; struct intel_fbc *fbc; fbc = i915->display.fbc[INTEL_FBC_A]; if (fbc) intel_fbc_debugfs_add(fbc, minor->debugfs_root); }
linux-master
drivers/gpu/drm/i915/display/intel_fbc.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <linux/bitops.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_de.h" #include "intel_display_trace.h" #include "intel_pmdemand.h" #include "skl_watermark.h" static struct intel_global_state * intel_pmdemand_duplicate_state(struct intel_global_obj *obj) { struct intel_pmdemand_state *pmdemand_state; pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL); if (!pmdemand_state) return NULL; return &pmdemand_state->base; } static void intel_pmdemand_destroy_state(struct intel_global_obj *obj, struct intel_global_state *state) { kfree(state); } static const struct intel_global_state_funcs intel_pmdemand_funcs = { .atomic_duplicate_state = intel_pmdemand_duplicate_state, .atomic_destroy_state = intel_pmdemand_destroy_state, }; static struct intel_pmdemand_state * intel_atomic_get_pmdemand_state(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_global_state *pmdemand_state = intel_atomic_get_global_obj_state(state, &i915->display.pmdemand.obj); if (IS_ERR(pmdemand_state)) return ERR_CAST(pmdemand_state); return to_intel_pmdemand_state(pmdemand_state); } static struct intel_pmdemand_state * intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_global_state *pmdemand_state = intel_atomic_get_old_global_obj_state(state, &i915->display.pmdemand.obj); if (!pmdemand_state) return NULL; return to_intel_pmdemand_state(pmdemand_state); } static struct intel_pmdemand_state * intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_global_state *pmdemand_state = intel_atomic_get_new_global_obj_state(state, &i915->display.pmdemand.obj); if (!pmdemand_state) return NULL; return to_intel_pmdemand_state(pmdemand_state); } int intel_pmdemand_init(struct drm_i915_private *i915) { struct intel_pmdemand_state *pmdemand_state; pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL); if (!pmdemand_state) return -ENOMEM; intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj, &pmdemand_state->base, &intel_pmdemand_funcs); if (IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) /* Wa_14016740474 */ intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); return 0; } void intel_pmdemand_init_early(struct drm_i915_private *i915) { mutex_init(&i915->display.pmdemand.lock); init_waitqueue_head(&i915->display.pmdemand.waitqueue); } void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915, struct intel_encoder *encoder, struct intel_pmdemand_state *pmdemand_state, bool set_bit) { enum phy phy; if (DISPLAY_VER(i915) < 14) return; if (!encoder) return; phy = intel_port_to_phy(i915, encoder->port); if (intel_phy_is_tc(i915, phy)) return; if (set_bit) pmdemand_state->active_combo_phys_mask |= BIT(phy); else pmdemand_state->active_combo_phys_mask &= ~BIT(phy); } void intel_pmdemand_update_port_clock(struct drm_i915_private *i915, struct intel_pmdemand_state *pmdemand_state, enum pipe pipe, int port_clock) { if (DISPLAY_VER(i915) < 14) return; pmdemand_state->ddi_clocks[pipe] = port_clock; } static void intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915, struct intel_atomic_state *state, struct intel_pmdemand_state *pmdemand_state) { int max_ddiclk = 0; const struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) intel_pmdemand_update_port_clock(i915, pmdemand_state, crtc->pipe, new_crtc_state->port_clock); for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++) max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk); pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000); } static void intel_pmdemand_update_connector_phys(struct drm_i915_private *i915, struct intel_atomic_state *state, struct drm_connector_state *conn_state, bool set_bit, struct intel_pmdemand_state *pmdemand_state) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); struct intel_crtc_state *crtc_state; if (!crtc) return; if (set_bit) crtc_state = intel_atomic_get_new_crtc_state(state, crtc); else crtc_state = intel_atomic_get_old_crtc_state(state, crtc); if (!crtc_state->hw.active) return; intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, set_bit); } static void intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915, struct intel_atomic_state *state, struct intel_pmdemand_state *pmdemand_state) { struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_connector *connector; int i; for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { if (!intel_connector_needs_modeset(state, connector)) continue; /* First clear the active phys in the old connector state */ intel_pmdemand_update_connector_phys(i915, state, old_conn_state, false, pmdemand_state); /* Then set the active phys in new connector state */ intel_pmdemand_update_connector_phys(i915, state, new_conn_state, true, pmdemand_state); } pmdemand_state->params.active_phys = min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask), 7); } static bool intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915, struct intel_encoder *encoder) { enum phy phy; if (!encoder) return false; phy = intel_port_to_phy(i915, encoder->port); return intel_phy_is_tc(i915, phy); } static bool intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct drm_connector_state *old_conn_state; struct drm_connector_state *new_conn_state; struct drm_connector *connector; int i; for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { struct intel_encoder *old_encoder = to_intel_encoder(old_conn_state->best_encoder); struct intel_encoder *new_encoder = to_intel_encoder(new_conn_state->best_encoder); if (!intel_connector_needs_modeset(state, connector)) continue; if (old_encoder == new_encoder || (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) && intel_pmdemand_encoder_has_tc_phy(i915, new_encoder))) continue; return true; } return false; } static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) { const struct intel_bw_state *new_bw_state, *old_bw_state; const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; const struct intel_crtc_state *new_crtc_state, *old_crtc_state; const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; struct intel_crtc *crtc; int i; new_bw_state = intel_atomic_get_new_bw_state(state); old_bw_state = intel_atomic_get_old_bw_state(state); if (new_bw_state && new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw) return true; new_dbuf_state = intel_atomic_get_new_dbuf_state(state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (new_dbuf_state && (new_dbuf_state->active_pipes != old_dbuf_state->active_pipes || new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)) return true; new_cdclk_state = intel_atomic_get_new_cdclk_state(state); old_cdclk_state = intel_atomic_get_old_cdclk_state(state); if (new_cdclk_state && (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk || new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level)) return true; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) if (new_crtc_state->port_clock != old_crtc_state->port_clock) return true; return intel_pmdemand_connector_needs_update(state); } int intel_pmdemand_atomic_check(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *new_bw_state; const struct intel_cdclk_state *new_cdclk_state; const struct intel_dbuf_state *new_dbuf_state; struct intel_pmdemand_state *new_pmdemand_state; if (DISPLAY_VER(i915) < 14) return 0; if (!intel_pmdemand_needs_update(state)) return 0; new_pmdemand_state = intel_atomic_get_pmdemand_state(state); if (IS_ERR(new_pmdemand_state)) return PTR_ERR(new_pmdemand_state); new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); /* firmware will calculate the qclk_gv_index, requirement is set to 0 */ new_pmdemand_state->params.qclk_gv_index = 0; new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw; new_dbuf_state = intel_atomic_get_dbuf_state(state); if (IS_ERR(new_dbuf_state)) return PTR_ERR(new_dbuf_state); new_pmdemand_state->params.active_pipes = min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); new_pmdemand_state->params.active_dbufs = min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); new_cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(new_cdclk_state)) return PTR_ERR(new_cdclk_state); new_pmdemand_state->params.voltage_index = new_cdclk_state->actual.voltage_level; new_pmdemand_state->params.cdclk_freq_mhz = DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000); intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state); intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state); /* * Active_PLLs starts with 1 because of CDCLK PLL. * TODO: Missing to account genlock filter when it gets used. */ new_pmdemand_state->params.plls = min_t(u16, new_pmdemand_state->params.active_phys + 1, 7); /* * Setting scalers to max as it can not be calculated during flips and * fastsets without taking global states locks. */ new_pmdemand_state->params.scalers = 7; if (state->base.allow_modeset) return intel_atomic_serialize_global_state(&new_pmdemand_state->base); else return intel_atomic_lock_global_state(&new_pmdemand_state->base); } static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915) { return !(intel_de_wait_for_clear(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), XELPDP_PMDEMAND_REQ_ENABLE, 10) || intel_de_wait_for_clear(i915, GEN12_DCPR_STATUS_1, XELPDP_PMDEMAND_INFLIGHT_STATUS, 10)); } void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, struct intel_pmdemand_state *pmdemand_state) { u32 reg1, reg2; if (DISPLAY_VER(i915) < 14) return; mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) { memset(&pmdemand_state->params, 0, sizeof(pmdemand_state->params)); goto unlock; } reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); /* Set 1*/ pmdemand_state->params.qclk_gv_bw = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); pmdemand_state->params.voltage_index = REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); pmdemand_state->params.qclk_gv_index = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); pmdemand_state->params.active_pipes = REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); pmdemand_state->params.active_dbufs = REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); pmdemand_state->params.active_phys = REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); /* Set 2*/ pmdemand_state->params.cdclk_freq_mhz = REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); pmdemand_state->params.ddiclk_max = REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); pmdemand_state->params.scalers = REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); unlock: mutex_unlock(&i915->display.pmdemand.lock); } static bool intel_pmdemand_req_complete(struct drm_i915_private *i915) { return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) & XELPDP_PMDEMAND_REQ_ENABLE); } static void intel_pmdemand_wait(struct drm_i915_private *i915) { if (!wait_event_timeout(i915->display.pmdemand.waitqueue, intel_pmdemand_req_complete(i915), msecs_to_jiffies_timeout(10))) drm_err(&i915->drm, "timed out waiting for Punit PM Demand Response\n"); } /* Required to be programmed during Display Init Sequences. */ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, u8 dbuf_slices) { u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) goto unlock; intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), XELPDP_PMDEMAND_DBUFS_MASK, REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs)); intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, XELPDP_PMDEMAND_REQ_ENABLE); intel_pmdemand_wait(i915); unlock: mutex_unlock(&i915->display.pmdemand.lock); } static void intel_pmdemand_update_params(const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old, u32 *reg1, u32 *reg2, bool serialized) { /* * The pmdemand parameter updates happens in two steps. Pre plane and * post plane updates. During the pre plane, as DE might still be * handling with some old operations, to avoid unexpected performance * issues, program the pmdemand parameters with higher of old and new * values. And then after once settled, use the new parameter values * as part of the post plane update. * * If the pmdemand params update happens without modeset allowed, this * means we can't serialize the updates. So that implies possibility of * some parallel atomic commits affecting the pmdemand parameters. In * that case, we need to consider the current values from the register * as well. So in pre-plane case, we need to check the max of old, new * and current register value if not serialized. In post plane update * we need to consider max of new and current register value if not * serialized */ #define update_reg(reg, field, mask) do { \ u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \ u32 old_val = old ? old->params.field : 0; \ u32 new_val = new->params.field; \ \ *(reg) &= ~(mask); \ *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \ } while (0) /* Set 1*/ update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); /* Set 2*/ update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); #undef update_reg } static void intel_pmdemand_program_params(struct drm_i915_private *i915, const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old, bool serialized) { bool changed = false; u32 reg1, mod_reg1; u32 reg2, mod_reg2; mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) goto unlock; reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); mod_reg1 = reg1; reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); mod_reg2 = reg2; intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, serialized); if (reg1 != mod_reg1) { intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), mod_reg1); changed = true; } if (reg2 != mod_reg2) { intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), mod_reg2); changed = true; } /* Initiate pm demand request only if register values are changed */ if (!changed) goto unlock; drm_dbg_kms(&i915->drm, "initate pmdemand request values: (0x%x 0x%x)\n", mod_reg1, mod_reg2); intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, XELPDP_PMDEMAND_REQ_ENABLE); intel_pmdemand_wait(i915); unlock: mutex_unlock(&i915->display.pmdemand.lock); } static bool intel_pmdemand_state_changed(const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old) { return memcmp(&new->params, &old->params, sizeof(new->params)) != 0; } void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_pmdemand_state *new_pmdemand_state = intel_atomic_get_new_pmdemand_state(state); const struct intel_pmdemand_state *old_pmdemand_state = intel_atomic_get_old_pmdemand_state(state); if (DISPLAY_VER(i915) < 14) return; if (!new_pmdemand_state || !intel_pmdemand_state_changed(new_pmdemand_state, old_pmdemand_state)) return; WARN_ON(!new_pmdemand_state->base.changed); intel_pmdemand_program_params(i915, new_pmdemand_state, old_pmdemand_state, intel_atomic_global_state_is_serialized(state)); } void intel_pmdemand_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_pmdemand_state *new_pmdemand_state = intel_atomic_get_new_pmdemand_state(state); const struct intel_pmdemand_state *old_pmdemand_state = intel_atomic_get_old_pmdemand_state(state); if (DISPLAY_VER(i915) < 14) return; if (!new_pmdemand_state || !intel_pmdemand_state_changed(new_pmdemand_state, old_pmdemand_state)) return; WARN_ON(!new_pmdemand_state->base.changed); intel_pmdemand_program_params(i915, new_pmdemand_state, NULL, intel_atomic_global_state_is_serialized(state)); }
linux-master
drivers/gpu/drm/i915/display/intel_pmdemand.c
// SPDX-License-Identifier: MIT /* * Copyright © 2018 Intel Corporation */ #include "i915_reg.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_de.h" #include "intel_display_types.h" #define for_each_combo_phy(__dev_priv, __phy) \ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ for_each_if(intel_phy_is_combo(__dev_priv, __phy)) #define for_each_combo_phy_reverse(__dev_priv, __phy) \ for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \ for_each_if(intel_phy_is_combo(__dev_priv, __phy)) enum { PROCMON_0_85V_DOT_0, PROCMON_0_95V_DOT_0, PROCMON_0_95V_DOT_1, PROCMON_1_05V_DOT_0, PROCMON_1_05V_DOT_1, }; static const struct icl_procmon { const char *name; u32 dw1, dw9, dw10; } icl_procmon_values[] = { [PROCMON_0_85V_DOT_0] = { .name = "0.85V dot0 (low-voltage)", .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, [PROCMON_0_95V_DOT_0] = { .name = "0.95V dot0", .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, [PROCMON_0_95V_DOT_1] = { .name = "0.95V dot1", .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, [PROCMON_1_05V_DOT_0] = { .name = "1.05V dot0", .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, [PROCMON_1_05V_DOT_1] = { .name = "1.05V dot1", .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, }; static const struct icl_procmon * icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { u32 val; val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy)); switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); fallthrough; case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: return &icl_procmon_values[PROCMON_0_85V_DOT_0]; case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: return &icl_procmon_values[PROCMON_0_95V_DOT_0]; case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: return &icl_procmon_values[PROCMON_0_95V_DOT_1]; case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: return &icl_procmon_values[PROCMON_1_05V_DOT_0]; case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: return &icl_procmon_values[PROCMON_1_05V_DOT_1]; } } static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct icl_procmon *procmon; procmon = icl_get_procmon_ref_values(dev_priv, phy); intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy), (0xff << 16) | 0xff, procmon->dw1); intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9); intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10); } static bool check_phy_reg(struct drm_i915_private *dev_priv, enum phy phy, i915_reg_t reg, u32 mask, u32 expected_val) { u32 val = intel_de_read(dev_priv, reg); if ((val & mask) != expected_val) { drm_dbg(&dev_priv->drm, "Combo PHY %c reg %08x state mismatch: " "current %08x mask %08x expected %08x\n", phy_name(phy), reg.reg, val, mask, expected_val); return false; } return true; } static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct icl_procmon *procmon; bool ret; procmon = icl_get_procmon_ref_values(dev_priv, phy); drm_dbg_kms(&dev_priv->drm, "Combo PHY %c Voltage/Process Info : %s\n", phy_name(phy), procmon->name); ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy), (0xff << 16) | 0xff, procmon->dw1); ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy), -1U, procmon->dw9); ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy), -1U, procmon->dw10); return ret; } static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy) { /* * Some platforms only expect PHY_MISC to be programmed for PHY-A and * PHY-B and may not even have instances of the register for the * other combo PHY's. * * ADL-S technically has three instances of PHY_MISC, but only requires * that we program it for PHY A. */ if (IS_ALDERLAKE_S(i915)) return phy == PHY_A; else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) || IS_ROCKETLAKE(i915) || IS_DG1(i915)) return phy < PHY_C; return true; } static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, enum phy phy) { /* The PHY C added by EHL has no PHY_MISC register */ if (!has_phy_misc(dev_priv, phy)) return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT; else return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) & ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT); } static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915) { bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A); bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D); bool dsi_present = intel_bios_is_dsi_present(i915, NULL); /* * VBT's 'dvo port' field for child devices references the DDI, not * the PHY. So if combo PHY A is wired up to drive an external * display, we should see a child device present on PORT_D and * nothing on PORT_A and no DSI. */ if (ddi_d_present && !ddi_a_present && !dsi_present) return true; /* * If we encounter a VBT that claims to have an external display on * DDI-D _and_ an internal display on DDI-A/DSI leave an error message * in the log and let the internal display win. */ if (ddi_d_present) drm_err(&i915->drm, "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n"); return false; } static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) { /* * Certain PHYs are connected to compensation resistors and act * as masters to other PHYs. * * ICL,TGL: * A(master) -> B(slave), C(slave) * RKL,DG1: * A(master) -> B(slave) * C(master) -> D(slave) * ADL-S: * A(master) -> B(slave), C(slave) * D(master) -> E(slave) * * We must set the IREFGEN bit for any PHY acting as a master * to another PHY. */ if (phy == PHY_A) return true; else if (IS_ALDERLAKE_S(dev_priv)) return phy == PHY_D; else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy == PHY_C; return false; } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, enum phy phy) { bool ret = true; u32 expected_val = 0; if (!icl_combo_phy_enabled(dev_priv, phy)) return false; if (DISPLAY_VER(dev_priv) >= 12) { ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy), ICL_PORT_TX_DW8_ODCC_CLK_SEL | ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, ICL_PORT_TX_DW8_ODCC_CLK_SEL | ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy), DCC_MODE_SELECT_MASK, RUN_DCC_ONCE); } ret &= icl_verify_procmon_ref_values(dev_priv, phy); if (phy_is_master(dev_priv, phy)) { ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { if (ehl_vbt_ddi_d_present(dev_priv)) expected_val = ICL_PHY_MISC_MUX_DDID; ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy), ICL_PHY_MISC_MUX_DDID, expected_val); } } ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy), CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); return ret; } void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, enum phy phy, bool is_dsi, int lane_count, bool lane_reversal) { u8 lane_mask; if (is_dsi) { drm_WARN_ON(&dev_priv->drm, lane_reversal); switch (lane_count) { case 1: lane_mask = PWR_DOWN_LN_3_1_0; break; case 2: lane_mask = PWR_DOWN_LN_3_1; break; case 3: lane_mask = PWR_DOWN_LN_3; break; default: MISSING_CASE(lane_count); fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; } } else { switch (lane_count) { case 1: lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 : PWR_DOWN_LN_3_2_1; break; case 2: lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 : PWR_DOWN_LN_3_2; break; default: MISSING_CASE(lane_count); fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; } } intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), PWR_DOWN_LN_MASK, lane_mask); } static void icl_combo_phys_init(struct drm_i915_private *dev_priv) { enum phy phy; for_each_combo_phy(dev_priv, phy) { u32 val; if (icl_combo_phy_verify_state(dev_priv, phy)) { drm_dbg(&dev_priv->drm, "Combo PHY %c already enabled, won't reprogram it.\n", phy_name(phy)); continue; } if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; /* * EHL's combo PHY A can be hooked up to either an external * display (via DDI-D) or an internal display (via DDI-A or * the DSI DPHY). This is a motherboard design decision that * can't be changed on the fly, so initialize the PHY's mux * based on whether our VBT indicates the presence of any * "internal" child devices. */ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && phy == PHY_A) { val &= ~ICL_PHY_MISC_MUX_DDID; if (ehl_vbt_ddi_d_present(dev_priv)) val |= ICL_PHY_MISC_MUX_DDID; } val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); skip_phy_misc: if (DISPLAY_VER(dev_priv) >= 12) { val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy)); val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK; val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL; val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2; intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val); val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); val &= ~DCC_MODE_SELECT_MASK; val |= RUN_DCC_ONCE; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); } icl_set_procmon_ref_values(dev_priv, phy); if (phy_is_master(dev_priv, phy)) intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy), 0, IREFGEN); intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT); intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, CL_POWER_DOWN_ENABLE); } } static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) { enum phy phy; for_each_combo_phy_reverse(dev_priv, phy) { if (phy == PHY_A && !icl_combo_phy_verify_state(dev_priv, phy)) { if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { /* * A known problem with old ifwi: * https://gitlab.freedesktop.org/drm/intel/-/issues/2411 * Suppress the warning for CI. Remove ASAP! */ drm_dbg_kms(&dev_priv->drm, "Combo PHY %c HW state changed unexpectedly\n", phy_name(phy)); } else { drm_warn(&dev_priv->drm, "Combo PHY %c HW state changed unexpectedly\n", phy_name(phy)); } } if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0, ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN); skip_phy_misc: intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0); } } void intel_combo_phy_init(struct drm_i915_private *i915) { icl_combo_phys_init(i915); } void intel_combo_phy_uninit(struct drm_i915_private *i915) { icl_combo_phys_uninit(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_combo_phy.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <drm/drm_atomic_state_helper.h> #include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_display_core.h" #include "intel_display_types.h" #include "skl_watermark.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd; }; struct intel_psf_gv_point { u8 clk; /* clock in multiples of 16.6666 MHz */ }; struct intel_qgv_info { struct intel_qgv_point points[I915_NUM_QGV_POINTS]; struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS]; u8 num_points; u8 num_psf_points; u8 t_bl; u8 max_numchannels; u8 channel_width; u8 deinterleave; }; static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { u32 dclk_ratio, dclk_reference; u32 val; val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val); if (val & DG1_QCLK_REFERENCE) dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ else dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) sp->dclk *= 2; if (sp->dclk == 0) return -EINVAL; val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val); sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val); val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val); sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val); sp->t_rc = sp->t_rp + sp->t_ras; return 0; } static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { u32 val = 0, val2 = 0; u16 dclk; int ret; ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), &val, &val2); if (ret) return ret; dclk = val & 0xffff; sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; sp->t_rdpre = val2 & 0xff; sp->t_ras = (val2 & 0xff00) >> 8; sp->t_rc = sp->t_rp + sp->t_ras; return 0; } static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, struct intel_psf_gv_point *points) { u32 val = 0; int ret; int i; ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) { points[i].clk = val & 0xff; val >>= 8; } return 0; } static u16 icl_qgv_points_mask(struct drm_i915_private *i915) { unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; u16 qgv_points = 0, psf_points = 0; /* * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects * it with failure if we try masking any unadvertised points. * So need to operate only with those returned from PCode. */ if (num_qgv_points > 0) qgv_points = GENMASK(num_qgv_points - 1, 0); if (num_psf_gv_points > 0) psf_points = GENMASK(num_psf_gv_points - 1, 0); return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); } static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) { return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & ICL_PCODE_REQ_QGV_PT_MASK); } int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, u32 points_mask) { int ret; if (DISPLAY_VER(dev_priv) >= 14) return 0; /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, points_mask, ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, 1); if (ret < 0) { drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask); return ret; } dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? I915_SAGV_ENABLED : I915_SAGV_DISABLED; return 0; } static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { u32 val, val2; u16 dclk; val = intel_uncore_read(&dev_priv->uncore, MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); val2 = intel_uncore_read(&dev_priv->uncore, MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000); sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val); sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val); sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2); sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2); sp->t_rc = sp->t_rp + sp->t_ras; return 0; } static int intel_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { if (DISPLAY_VER(dev_priv) >= 14) return mtl_read_qgv_point_info(dev_priv, sp, point); else if (IS_DG1(dev_priv)) return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point); else return icl_pcode_read_qgv_point_info(dev_priv, sp, point); } static int icl_get_qgv_points(struct drm_i915_private *dev_priv, struct intel_qgv_info *qi, bool is_y_tile) { const struct dram_info *dram_info = &dev_priv->dram_info; int i, ret; qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; if (DISPLAY_VER(dev_priv) >= 14) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = 4; qi->max_numchannels = 2; qi->channel_width = 64; qi->deinterleave = 2; break; case INTEL_DRAM_DDR5: qi->t_bl = 8; qi->max_numchannels = 4; qi->channel_width = 32; qi->deinterleave = 2; break; case INTEL_DRAM_LPDDR4: case INTEL_DRAM_LPDDR5: qi->t_bl = 16; qi->max_numchannels = 8; qi->channel_width = 16; qi->deinterleave = 4; break; default: MISSING_CASE(dram_info->type); return -EINVAL; } } else if (DISPLAY_VER(dev_priv) >= 12) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = is_y_tile ? 8 : 4; qi->max_numchannels = 2; qi->channel_width = 64; qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_DDR5: qi->t_bl = is_y_tile ? 16 : 8; qi->max_numchannels = 4; qi->channel_width = 32; qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_LPDDR4: if (IS_ROCKETLAKE(dev_priv)) { qi->t_bl = 8; qi->max_numchannels = 4; qi->channel_width = 32; qi->deinterleave = 2; break; } fallthrough; case INTEL_DRAM_LPDDR5: qi->t_bl = 16; qi->max_numchannels = 8; qi->channel_width = 16; qi->deinterleave = is_y_tile ? 2 : 4; break; default: qi->t_bl = 16; qi->max_numchannels = 1; break; } } else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; qi->max_numchannels = 1; } if (drm_WARN_ON(&dev_priv->drm, qi->num_points > ARRAY_SIZE(qi->points))) qi->num_points = ARRAY_SIZE(qi->points); for (i = 0; i < qi->num_points; i++) { struct intel_qgv_point *sp = &qi->points[i]; ret = intel_read_qgv_point_info(dev_priv, sp, i); if (ret) return ret; drm_dbg_kms(&dev_priv->drm, "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, sp->t_rcd, sp->t_rc); } if (qi->num_psf_points > 0) { ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points); if (ret) { drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); qi->num_psf_points = 0; } for (i = 0; i < qi->num_psf_points; i++) drm_dbg_kms(&dev_priv->drm, "PSF GV %d: CLK=%d \n", i, qi->psf_points[i].clk); } return 0; } static int adl_calc_psf_bw(int clk) { /* * clk is multiples of 16.666MHz (100/6) * According to BSpec PSF GV bandwidth is * calculated as BW = 64 * clk * 16.666Mhz */ return DIV_ROUND_CLOSEST(64 * clk * 100, 6); } static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) { u16 dclk = 0; int i; for (i = 0; i < qi->num_points; i++) dclk = max(dclk, qi->points[i].dclk); return dclk; } struct intel_sa_info { u16 displayrtids; u8 deburst, deprogbwlimit, derating; }; static const struct intel_sa_info icl_sa_info = { .deburst = 8, .deprogbwlimit = 25, /* GB/s */ .displayrtids = 128, .derating = 10, }; static const struct intel_sa_info tgl_sa_info = { .deburst = 16, .deprogbwlimit = 34, /* GB/s */ .displayrtids = 256, .derating = 10, }; static const struct intel_sa_info rkl_sa_info = { .deburst = 8, .deprogbwlimit = 20, /* GB/s */ .displayrtids = 128, .derating = 10, }; static const struct intel_sa_info adls_sa_info = { .deburst = 16, .deprogbwlimit = 38, /* GB/s */ .displayrtids = 256, .derating = 10, }; static const struct intel_sa_info adlp_sa_info = { .deburst = 16, .deprogbwlimit = 38, /* GB/s */ .displayrtids = 256, .derating = 20, }; static const struct intel_sa_info mtl_sa_info = { .deburst = 32, .deprogbwlimit = 38, /* GB/s */ .displayrtids = 256, .derating = 10, }; static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } dclk_max = icl_sagv_max_dclk(&qi); maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; int clpchgroup; int j; clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; /* * Max row cycle time * * FIXME what is the logic behind the * assumed burst length? */ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); drm_dbg_kms(&dev_priv->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } } /* * In case if SAGV is disabled in BIOS, we always get 1 * SAGV point, but we can't send PCode commands to restrict it * as it will fail and pointless anyway. */ if (qi.num_points == 1) dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; const struct dram_info *dram_info = &dev_priv->dram_info; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw, peakbw; int clperchgroup; int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } if (DISPLAY_VER(dev_priv) < 14 && (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)) num_channels *= 2; qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); if (qi.max_numchannels != 0) num_channels = min_t(u8, num_channels, qi.max_numchannels); dclk_max = icl_sagv_max_dclk(&qi); peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); /* * clperchgroup = 4kpagespermempage * clperchperblock, * clperchperblock = 8 / num_channels * interleave */ clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; struct intel_bw_info *bi_next; int clpchgroup; int j; clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; if (i < num_groups - 1) { bi_next = &dev_priv->display.bw.max[i + 1]; if (clpchgroup < clperchgroup) bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; else bi_next->num_planes = 0; } bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; /* * Max row cycle time * * FIXME what is the logic behind the * assumed burst length? */ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk * num_channels * qi.channel_width, 8); drm_dbg_kms(&dev_priv->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n", i, j, bi->num_planes, bi->deratedbw[j], bi->peakbw[j]); } for (j = 0; j < qi.num_psf_points; j++) { const struct intel_psf_gv_point *sp = &qi.psf_points[j]; bi->psf_bw[j] = adl_calc_psf_bw(sp->clk); drm_dbg_kms(&dev_priv->drm, "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } } /* * In case if SAGV is disabled in BIOS, we always get 1 * SAGV point, but we can't send PCode commands to restrict it * as it will fail and pointless anyway. */ if (qi.num_points == 1) dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } static void dg2_get_bw_info(struct drm_i915_private *i915) { unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000; int num_groups = ARRAY_SIZE(i915->display.bw.max); int i; /* * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth * that doesn't depend on the number of planes enabled. So fill all the * plane group with constant bw information for uniformity with other * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth, * whereas DG2-G11 platforms have 38 GB/s. */ for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &i915->display.bw.max[i]; bi->num_planes = 1; /* Need only one dummy QGV point per group */ bi->num_qgv_points = 1; bi->deratedbw[0] = deratedbw; } i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; } static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, int num_planes, int qgv_point) { int i; /* * Let's return max bw for 0 planes */ num_planes = max(1, num_planes); for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) { const struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when * SAGV is forced to off/min/med/max. */ if (qgv_point >= bi->num_qgv_points) return UINT_MAX; if (num_planes >= bi->num_planes) return i; } return UINT_MAX; } static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, int num_planes, int qgv_point) { int i; /* * Let's return max bw for 0 planes */ num_planes = max(1, num_planes); for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) { const struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when * SAGV is forced to off/min/med/max. */ if (qgv_point >= bi->num_qgv_points) return UINT_MAX; if (num_planes <= bi->num_planes) return i; } return 0; } static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { const struct intel_bw_info *bi = &dev_priv->display.bw.max[0]; return bi->psf_bw[psf_gv_point]; } void intel_bw_init_hw(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) return; if (DISPLAY_VER(dev_priv) >= 14) tgl_get_bw_info(dev_priv, &mtl_sa_info); else if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); else if (IS_ALDERLAKE_P(dev_priv)) tgl_get_bw_info(dev_priv, &adlp_sa_info); else if (IS_ALDERLAKE_S(dev_priv)) tgl_get_bw_info(dev_priv, &adls_sa_info); else if (IS_ROCKETLAKE(dev_priv)) tgl_get_bw_info(dev_priv, &rkl_sa_info); else if (DISPLAY_VER(dev_priv) == 12) tgl_get_bw_info(dev_priv, &tgl_sa_info); else if (DISPLAY_VER(dev_priv) == 11) icl_get_bw_info(dev_priv, &icl_sa_info); } static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) { /* * We assume cursors are small enough * to not not cause bandwidth problems. */ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); } static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); unsigned int data_rate = 0; enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { /* * We assume cursors are small enough * to not not cause bandwidth problems. */ if (plane_id == PLANE_CURSOR) continue; data_rate += crtc_state->data_rate[plane_id]; if (DISPLAY_VER(i915) < 11) data_rate += crtc_state->data_rate_y[plane_id]; } return data_rate; } /* "Maximum Pipe Read Bandwidth" */ static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (DISPLAY_VER(i915) < 12) return 0; return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512); } void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); bw_state->data_rate[crtc->pipe] = intel_bw_crtc_data_rate(crtc_state); bw_state->num_active_planes[crtc->pipe] = intel_bw_crtc_num_active_planes(crtc_state); drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), bw_state->data_rate[crtc->pipe], bw_state->num_active_planes[crtc->pipe]); } static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state) { unsigned int num_active_planes = 0; enum pipe pipe; for_each_pipe(dev_priv, pipe) num_active_planes += bw_state->num_active_planes[pipe]; return num_active_planes; } static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state) { unsigned int data_rate = 0; enum pipe pipe; for_each_pipe(dev_priv, pipe) data_rate += bw_state->data_rate[pipe]; if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv)) data_rate = DIV_ROUND_UP(data_rate * 105, 100); return data_rate; } struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } struct intel_bw_state * intel_atomic_get_new_bw_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); return to_intel_bw_state(bw_state); } static int mtl_find_qgv_points(struct drm_i915_private *i915, unsigned int data_rate, unsigned int num_active_planes, struct intel_bw_state *new_bw_state) { unsigned int best_rate = UINT_MAX; unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; unsigned int qgv_peak_bw = 0; int i; int ret; ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; /* * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is * not enabled. PM Demand code will clamp the value for the register */ if (!intel_can_enable_sagv(i915, new_bw_state)) { new_bw_state->qgv_point_peakbw = U16_MAX; drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw."); return 0; } /* * Find the best QGV point by comparing the data_rate with max data rate * offered per plane group */ for (i = 0; i < num_qgv_points; i++) { unsigned int bw_index = tgl_max_bw_index(i915, num_active_planes, i); unsigned int max_data_rate; if (bw_index >= ARRAY_SIZE(i915->display.bw.max)) continue; max_data_rate = i915->display.bw.max[bw_index].deratedbw[i]; if (max_data_rate < data_rate) continue; if (max_data_rate - data_rate < best_rate) { best_rate = max_data_rate - data_rate; qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i]; } drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", i, max_data_rate, data_rate, qgv_peak_bw); } drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", qgv_peak_bw, data_rate); /* * The display configuration cannot be supported if no QGV point * satisfying the required data rate is found */ if (qgv_peak_bw == 0) { drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } /* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */ new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100); return 0; } static int icl_find_qgv_points(struct drm_i915_private *i915, unsigned int data_rate, unsigned int num_active_planes, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { unsigned int max_bw_point = 0; unsigned int max_bw = 0; unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; u16 psf_points = 0; u16 qgv_points = 0; int i; int ret; ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; for (i = 0; i < num_qgv_points; i++) { unsigned int idx; unsigned int max_data_rate; if (DISPLAY_VER(i915) > 11) idx = tgl_max_bw_index(i915, num_active_planes, i); else idx = icl_max_bw_index(i915, num_active_planes, i); if (idx >= ARRAY_SIZE(i915->display.bw.max)) continue; max_data_rate = i915->display.bw.max[idx].deratedbw[i]; /* * We need to know which qgv point gives us * maximum bandwidth in order to disable SAGV * if we find that we exceed SAGV block time * with watermarks. By that moment we already * have those, as it is calculated earlier in * intel_atomic_check, */ if (max_data_rate > max_bw) { max_bw_point = i; max_bw = max_data_rate; } if (max_data_rate >= data_rate) qgv_points |= BIT(i); drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n", i, max_data_rate, data_rate); } for (i = 0; i < num_psf_gv_points; i++) { unsigned int max_data_rate = adl_psf_bw(i915, i); if (max_data_rate >= data_rate) psf_points |= BIT(i); drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d" " required %d\n", i, max_data_rate, data_rate); } /* * BSpec states that we always should have at least one allowed point * left, so if we couldn't - simply reject the configuration for obvious * reasons. */ if (qgv_points == 0) { drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } if (num_psf_gv_points > 0 && psf_points == 0) { drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } /* * Leave only single point with highest bandwidth, if * we can't enable SAGV due to the increased memory latency it may * cause. */ if (!intel_can_enable_sagv(i915, new_bw_state)) { qgv_points = BIT(max_bw_point); drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n", max_bw_point); } /* * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter. */ new_bw_state->qgv_points_mask = ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) */ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) { ret = intel_atomic_serialize_global_state(&new_bw_state->base); if (ret) return ret; } return 0; } static int intel_bw_check_qgv_points(struct drm_i915_private *i915, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state); unsigned int num_active_planes = intel_bw_num_active_planes(i915, new_bw_state); data_rate = DIV_ROUND_UP(data_rate, 1000); if (DISPLAY_VER(i915) >= 14) return mtl_find_qgv_points(i915, data_rate, num_active_planes, new_bw_state); else return icl_find_qgv_points(i915, data_rate, num_active_planes, old_bw_state, new_bw_state); } static bool intel_bw_state_changed(struct drm_i915_private *i915, const struct intel_bw_state *old_bw_state, const struct intel_bw_state *new_bw_state) { enum pipe pipe; for_each_pipe(i915, pipe) { const struct intel_dbuf_bw *old_crtc_bw = &old_bw_state->dbuf_bw[pipe]; const struct intel_dbuf_bw *new_crtc_bw = &new_bw_state->dbuf_bw[pipe]; enum dbuf_slice slice; for_each_dbuf_slice(i915, slice) { if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] || old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice]) return true; } if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe]) return true; } return false; } static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state, struct intel_crtc *crtc, enum plane_id plane_id, const struct skl_ddb_entry *ddb, unsigned int data_rate) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb); enum dbuf_slice slice; /* * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) { crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate); crtc_bw->active_planes[slice] |= BIT(plane_id); } } static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; enum plane_id plane_id; memset(crtc_bw, 0, sizeof(*crtc_bw)); if (!crtc_state->hw.active) return; for_each_plane_id_on_crtc(crtc, plane_id) { /* * We assume cursors are small enough * to not cause bandwidth problems. */ if (plane_id == PLANE_CURSOR) continue; skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id, &crtc_state->wm.skl.plane_ddb[plane_id], crtc_state->data_rate[plane_id]); if (DISPLAY_VER(i915) < 11) skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id, &crtc_state->wm.skl.plane_ddb_y[plane_id], crtc_state->data_rate[plane_id]); } } /* "Maximum Data Buffer Bandwidth" */ static int intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, const struct intel_bw_state *bw_state) { unsigned int total_max_bw = 0; enum dbuf_slice slice; for_each_dbuf_slice(i915, slice) { int num_active_planes = 0; unsigned int max_bw = 0; enum pipe pipe; /* * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ for_each_pipe(i915, pipe) { const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe]; max_bw = max(crtc_bw->max_bw[slice], max_bw); num_active_planes += hweight8(crtc_bw->active_planes[slice]); } max_bw *= num_active_planes; total_max_bw = max(total_max_bw, max_bw); } return DIV_ROUND_UP(total_max_bw, 64); } int intel_bw_min_cdclk(struct drm_i915_private *i915, const struct intel_bw_state *bw_state) { enum pipe pipe; int min_cdclk; min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state); for_each_pipe(i915, pipe) min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk); return min_cdclk; } int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_bw_state *new_bw_state = NULL; const struct intel_bw_state *old_bw_state = NULL; const struct intel_cdclk_state *cdclk_state; const struct intel_crtc_state *crtc_state; int old_min_cdclk, new_min_cdclk; struct intel_crtc *crtc; int i; if (DISPLAY_VER(dev_priv) < 9) return 0; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state); new_bw_state->min_cdclk[crtc->pipe] = intel_bw_crtc_min_cdclk(crtc_state); } if (!old_bw_state) return 0; if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) { int ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; } old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state); new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state); /* * No need to check against the cdclk state if * the min cdclk doesn't increase. * * Ie. we only ever increase the cdclk due to bandwidth * requirements. This can reduce back and forth * display blinking due to constant cdclk changes. */ if (new_min_cdclk <= old_min_cdclk) return 0; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); /* * No need to recalculate the cdclk state if * the min cdclk doesn't increase. * * Ie. we only ever increase the cdclk due to bandwidth * requirements. This can reduce back and forth * display blinking due to constant cdclk changes. */ if (new_min_cdclk <= cdclk_state->bw_min_cdclk) return 0; drm_dbg_kms(&dev_priv->drm, "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n", new_min_cdclk, cdclk_state->bw_min_cdclk); *need_cdclk_calc = true; return 0; } static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { unsigned int old_data_rate = intel_bw_crtc_data_rate(old_crtc_state); unsigned int new_data_rate = intel_bw_crtc_data_rate(new_crtc_state); unsigned int old_active_planes = intel_bw_crtc_num_active_planes(old_crtc_state); unsigned int new_active_planes = intel_bw_crtc_num_active_planes(new_crtc_state); struct intel_bw_state *new_bw_state; /* * Avoid locking the bw state when * nothing significant has changed. */ if (old_data_rate == new_data_rate && old_active_planes == new_active_planes) continue; new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; *changed = true; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] data rate %u num active planes %u\n", crtc->base.base.id, crtc->base.name, new_bw_state->data_rate[crtc->pipe], new_bw_state->num_active_planes[crtc->pipe]); } return 0; } int intel_bw_atomic_check(struct intel_atomic_state *state) { bool changed = false; struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_bw_state *new_bw_state; const struct intel_bw_state *old_bw_state; int ret; /* FIXME earlier gens need some checks too */ if (DISPLAY_VER(i915) < 11) return 0; ret = intel_bw_check_data_rate(state, &changed); if (ret) return ret; old_bw_state = intel_atomic_get_old_bw_state(state); new_bw_state = intel_atomic_get_new_bw_state(state); if (new_bw_state && intel_can_enable_sagv(i915, old_bw_state) != intel_can_enable_sagv(i915, new_bw_state)) changed = true; /* * If none of our inputs (data rates, number of active * planes, SAGV yes/no) changed then nothing to do here. */ if (!changed) return 0; ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state); if (ret) return ret; return 0; } static struct intel_global_state * intel_bw_duplicate_state(struct intel_global_obj *obj) { struct intel_bw_state *state; state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); if (!state) return NULL; return &state->base; } static void intel_bw_destroy_state(struct intel_global_obj *obj, struct intel_global_state *state) { kfree(state); } static const struct intel_global_state_funcs intel_bw_funcs = { .atomic_duplicate_state = intel_bw_duplicate_state, .atomic_destroy_state = intel_bw_destroy_state, }; int intel_bw_init(struct drm_i915_private *dev_priv) { struct intel_bw_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj, &state->base, &intel_bw_funcs); return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_bw.c
/* * Copyright 2006 Dave Airlie <[email protected]> * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> */ #include <linux/i2c.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dvo.h" #include "intel_dvo_dev.h" #include "intel_dvo_regs.h" #include "intel_gmbus.h" #include "intel_panel.h" #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 #define INTEL_DVO_CHIP_LVDS_NO_FIXED 5 #define SIL164_ADDR 0x38 #define CH7xxx_ADDR 0x76 #define TFP410_ADDR 0x38 #define NS2501_ADDR 0x38 static const struct intel_dvo_device intel_dvo_devices[] = { { .type = INTEL_DVO_CHIP_TMDS, .name = "sil164", .port = PORT_C, .slave_addr = SIL164_ADDR, .dev_ops = &sil164_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .port = PORT_C, .slave_addr = CH7xxx_ADDR, .dev_ops = &ch7xxx_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "ch7xxx", .port = PORT_C, .slave_addr = 0x75, /* For some ch7010 */ .dev_ops = &ch7xxx_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ivch", .port = PORT_A, .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ .dev_ops = &ivch_ops, }, { .type = INTEL_DVO_CHIP_TMDS, .name = "tfp410", .port = PORT_C, .slave_addr = TFP410_ADDR, .dev_ops = &tfp410_ops, }, { .type = INTEL_DVO_CHIP_LVDS, .name = "ch7017", .port = PORT_C, .slave_addr = 0x75, .gpio = GMBUS_PIN_DPB, .dev_ops = &ch7017_ops, }, { .type = INTEL_DVO_CHIP_LVDS_NO_FIXED, .name = "ns2501", .port = PORT_B, .slave_addr = NS2501_ADDR, .dev_ops = &ns2501_ops, }, }; struct intel_dvo { struct intel_encoder base; struct intel_dvo_device dev; struct intel_connector *attached_connector; }; static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) { return container_of(encoder, struct intel_dvo, base); } static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector) { return enc_to_dvo(intel_attached_encoder(connector)); } static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; u32 tmp; tmp = intel_de_read(i915, DVO(port)); if (!(tmp & DVO_ENABLE)) return false; return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); } static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; u32 tmp; tmp = intel_de_read(i915, DVO(port)); *pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp); return tmp & DVO_ENABLE; } static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; u32 tmp, flags = 0; pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO); tmp = intel_de_read(i915, DVO(port)); if (tmp & DVO_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (tmp & DVO_VSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->hw.adjusted_mode.flags |= flags; pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void intel_disable_dvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0); intel_de_posting_read(i915, DVO(port)); } static void intel_enable_dvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, &pipe_config->hw.mode, &pipe_config->hw.adjusted_mode); intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE); intel_de_posting_read(i915, DVO(port)); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } static enum drm_mode_status intel_dvo_mode_valid(struct drm_connector *_connector, struct drm_display_mode *mode) { struct intel_connector *connector = to_intel_connector(_connector); struct intel_dvo *intel_dvo = intel_attached_dvo(connector); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, mode); int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq; int target_clock = mode->clock; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* XXX: Validate clock range */ if (fixed_mode) { enum drm_mode_status status; status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; target_clock = fixed_mode->clock; } if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } static int intel_dvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(intel_dvo->attached_connector, adjusted_mode); /* * If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ if (fixed_mode) { int ret; ret = intel_panel_compute_config(connector, adjusted_mode); if (ret) return ret; } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return 0; } static void intel_dvo_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port = encoder->port; enum pipe pipe = crtc->pipe; u32 dvo_val; /* Save the active data order, since I don't know what it should be set to. */ dvo_val = intel_de_read(i915, DVO(port)) & (DVO_DEDICATED_INT_ENABLE | DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK); dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | DVO_BLANK_ACTIVE_HIGH; dvo_val |= DVO_PIPE_SEL(pipe); dvo_val |= DVO_PIPE_STALL; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) dvo_val |= DVO_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; intel_de_write(i915, DVO_SRCDIM(port), DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) | DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay)); intel_de_write(i915, DVO(port), dvo_val); } static enum drm_connector_status intel_dvo_detect(struct drm_connector *_connector, bool force) { struct intel_connector *connector = to_intel_connector(_connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dvo *intel_dvo = intel_attached_dvo(connector); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id, connector->base.name); if (!INTEL_DISPLAY_ENABLED(i915)) return connector_status_disconnected; return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } static int intel_dvo_get_modes(struct drm_connector *_connector) { struct intel_connector *connector = to_intel_connector(_connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); int num_modes; /* * We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ num_modes = intel_ddc_get_modes(&connector->base, intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC)); if (num_modes) return num_modes; return intel_panel_get_modes(connector); } static const struct drm_connector_funcs intel_dvo_connector_funcs = { .detect = intel_dvo_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); if (intel_dvo->dev.dev_ops->destroy) intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_dvo_enc_funcs = { .destroy = intel_dvo_enc_destroy, }; static int intel_dvo_encoder_type(const struct intel_dvo_device *dvo) { switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: return DRM_MODE_ENCODER_TMDS; case INTEL_DVO_CHIP_LVDS_NO_FIXED: case INTEL_DVO_CHIP_LVDS: return DRM_MODE_ENCODER_LVDS; default: MISSING_CASE(dvo->type); return DRM_MODE_ENCODER_NONE; } } static int intel_dvo_connector_type(const struct intel_dvo_device *dvo) { switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: return DRM_MODE_CONNECTOR_DVII; case INTEL_DVO_CHIP_LVDS_NO_FIXED: case INTEL_DVO_CHIP_LVDS: return DRM_MODE_CONNECTOR_LVDS; default: MISSING_CASE(dvo->type); return DRM_MODE_CONNECTOR_Unknown; } } static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, struct intel_dvo *intel_dvo, const struct intel_dvo_device *dvo) { struct i2c_adapter *i2c; u32 dpll[I915_MAX_PIPES]; enum pipe pipe; int gpio; bool ret; /* * Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined * in the spec. */ if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio)) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) gpio = GMBUS_PIN_SSC; else gpio = GMBUS_PIN_DPB; /* * Set up the I2C bus necessary for the chip we're probing. * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ i2c = intel_gmbus_get_adapter(dev_priv, gpio); intel_dvo->dev = *dvo; /* * GMBUS NAK handling seems to be unstable, hence let the * transmitter detection run in bit banging mode for now. */ intel_gmbus_force_bit(i2c, true); /* * ns2501 requires the DVO 2x clock before it will * respond to i2c accesses, so make sure we have * the clock enabled before we attempt to initialize * the device. */ for_each_pipe(dev_priv, pipe) dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE); ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); /* restore the DVO 2x clock state to original */ for_each_pipe(dev_priv, pipe) { intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]); } intel_gmbus_force_bit(i2c, false); return ret; } static bool intel_dvo_probe(struct drm_i915_private *i915, struct intel_dvo *intel_dvo) { int i; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { if (intel_dvo_init_dev(i915, intel_dvo, &intel_dvo_devices[i])) return true; } return false; } void intel_dvo_init(struct drm_i915_private *i915) { struct intel_connector *connector; struct intel_encoder *encoder; struct intel_dvo *intel_dvo; intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL); if (!intel_dvo) return; connector = intel_connector_alloc(); if (!connector) { kfree(intel_dvo); return; } intel_dvo->attached_connector = connector; encoder = &intel_dvo->base; encoder->disable = intel_disable_dvo; encoder->enable = intel_enable_dvo; encoder->get_hw_state = intel_dvo_get_hw_state; encoder->get_config = intel_dvo_get_config; encoder->compute_config = intel_dvo_compute_config; encoder->pre_enable = intel_dvo_pre_enable; connector->get_hw_state = intel_dvo_connector_get_hw_state; if (!intel_dvo_probe(i915, intel_dvo)) { kfree(intel_dvo); intel_connector_free(connector); return; } assert_port_valid(i915, intel_dvo->dev.port); encoder->type = INTEL_OUTPUT_DVO; encoder->power_domain = POWER_DOMAIN_PORT_OTHER; encoder->port = intel_dvo->dev.port; encoder->pipe_mask = ~0; if (intel_dvo->dev.type != INTEL_DVO_CHIP_LVDS) encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) | BIT(INTEL_OUTPUT_DVO); drm_encoder_init(&i915->drm, &encoder->base, &intel_dvo_enc_funcs, intel_dvo_encoder_type(&intel_dvo->dev), "DVO %c", port_name(encoder->port)); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n", encoder->base.base.id, encoder->base.name, intel_dvo->dev.name); if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; drm_connector_init(&i915->drm, &connector->base, &intel_dvo_connector_funcs, intel_dvo_connector_type(&intel_dvo->dev)); drm_connector_helper_add(&connector->base, &intel_dvo_connector_helper_funcs); connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector_attach_encoder(connector, encoder); if (intel_dvo->dev.type == INTEL_DVO_CHIP_LVDS) { /* * For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. * However, it's in a different format from the BIOS * data on chipsets with integrated LVDS (stored in AIM * headers, likely), so for now, just get the current * mode being output through DVO. */ intel_panel_add_encoder_fixed_mode(connector, encoder); intel_panel_init(connector, NULL); } }
linux-master
drivers/gpu/drm/i915/display/intel_dvo.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "intel_atomic_plane.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_plane_initial.h" static bool intel_reuse_initial_plane_obj(struct drm_i915_private *i915, const struct intel_initial_plane_config *plane_config, struct drm_framebuffer **fb, struct i915_vma **vma) { struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); if (!crtc_state->uapi.active) continue; if (!plane_state->ggtt_vma) continue; if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { *fb = plane_state->hw.fb; *vma = plane_state->ggtt_vma; return true; } } return false; } static struct i915_vma * initial_plane_vma(struct drm_i915_private *i915, struct intel_initial_plane_config *plane_config) { struct intel_memory_region *mem; struct drm_i915_gem_object *obj; struct i915_vma *vma; resource_size_t phys_base; u32 base, size; u64 pinctl; if (plane_config->size == 0) return NULL; base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); if (IS_DGFX(i915)) { gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm; gen8_pte_t pte; gte += base / I915_GTT_PAGE_SIZE; pte = ioread64(gte); if (!(pte & GEN12_GGTT_PTE_LM)) { drm_err(&i915->drm, "Initial plane programming missing PTE_LM bit\n"); return NULL; } phys_base = pte & I915_GTT_PAGE_MASK; mem = i915->mm.regions[INTEL_REGION_LMEM_0]; /* * We don't currently expect this to ever be placed in the * stolen portion. */ if (phys_base >= resource_size(&mem->region)) { drm_err(&i915->drm, "Initial plane programming using invalid range, phys_base=%pa\n", &phys_base); return NULL; } drm_dbg(&i915->drm, "Using phys_base=%pa, based on initial plane programming\n", &phys_base); } else { phys_base = base; mem = i915->mm.stolen_region; } if (!mem) return NULL; size = round_up(plane_config->base + plane_config->size, mem->min_page_size); size -= base; /* * If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. */ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) && mem == i915->mm.stolen_region && size * 2 > i915->dsm.usable_size) return NULL; obj = i915_gem_object_create_region_at(mem, phys_base, size, I915_BO_ALLOC_USER | I915_BO_PREALLOC); if (IS_ERR(obj)) return NULL; /* * Mark it WT ahead of time to avoid changing the * cache_level during fbdev initialization. The * unbind there would get stuck waiting for rcu. */ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE); switch (plane_config->tiling) { case I915_TILING_NONE: break; case I915_TILING_X: case I915_TILING_Y: obj->tiling_and_stride = plane_config->fb->base.pitches[0] | plane_config->tiling; break; default: MISSING_CASE(plane_config->tiling); goto err_obj; } vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL); if (IS_ERR(vma)) goto err_obj; pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base; if (HAS_GMCH(i915)) pinctl |= PIN_MAPPABLE; if (i915_vma_pin(vma, 0, 0, pinctl)) goto err_obj; if (i915_gem_object_is_tiled(obj) && !i915_vma_is_map_and_fenceable(vma)) goto err_obj; return vma; err_obj: i915_gem_object_put(obj); return NULL; } static bool intel_alloc_initial_plane_obj(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_framebuffer *fb = &plane_config->fb->base; struct i915_vma *vma; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_4_TILED: break; default: drm_dbg(&dev_priv->drm, "Unsupported modifier for initial FB: 0x%llx\n", fb->modifier); return false; } vma = initial_plane_vma(dev_priv, plane_config); if (!vma) return false; mode_cmd.pixel_format = fb->format->format; mode_cmd.width = fb->width; mode_cmd.height = fb->height; mode_cmd.pitches[0] = fb->pitches[0]; mode_cmd.modifier[0] = fb->modifier; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(to_intel_framebuffer(fb), vma->obj, &mode_cmd)) { drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); goto err_vma; } plane_config->vma = vma; return true; err_vma: i915_vma_put(vma); return false; } static void intel_find_initial_plane_obj(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; /* * TODO: * Disable planes if get_initial_plane_config() failed. * Make sure things work if the surface base is not page aligned. */ if (!plane_config->fb) return; if (intel_alloc_initial_plane_obj(crtc, plane_config)) { fb = &plane_config->fb->base; vma = plane_config->vma; goto valid_fb; } /* * Failed to alloc the obj, check to see if we should share * an fb with another CRTC instead */ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma)) goto valid_fb; /* * We've failed to reconstruct the BIOS FB. Current display state * indicates that the primary plane is visible, but has a NULL FB, * which will lead to problems later if we don't fix it up. The * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ intel_plane_disable_noatomic(crtc, plane); return; valid_fb: plane_state->uapi.rotation = plane_config->rotation; intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->uapi.rotation, &plane_state->view); __i915_vma_pin(vma); plane_state->ggtt_vma = i915_vma_get(vma); if (intel_plane_uses_fence(plane_state) && i915_vma_pin_fence(vma) == 0 && vma->fence) plane_state->flags |= PLANE_HAS_FENCE; plane_state->uapi.src_x = 0; plane_state->uapi.src_y = 0; plane_state->uapi.src_w = fb->width << 16; plane_state->uapi.src_h = fb->height << 16; plane_state->uapi.crtc_x = 0; plane_state->uapi.crtc_y = 0; plane_state->uapi.crtc_w = fb->width; plane_state->uapi.crtc_h = fb->height; if (plane_config->tiling) dev_priv->preserve_bios_swizzle = true; plane_state->uapi.fb = fb; drm_framebuffer_get(fb); plane_state->uapi.crtc = &crtc->base; intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc); atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); } static void plane_config_fini(struct intel_initial_plane_config *plane_config) { if (plane_config->fb) { struct drm_framebuffer *fb = &plane_config->fb->base; /* We may only have the stub and not a full framebuffer */ if (drm_framebuffer_read_refcount(fb)) drm_framebuffer_put(fb); else kfree(fb); } if (plane_config->vma) i915_vma_put(plane_config->vma); } void intel_crtc_initial_plane_config(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_initial_plane_config plane_config = {}; /* * Note that reserving the BIOS fb up front prevents us * from stuffing other stolen allocations like the ring * on top. This prevents some ugliness at boot time, and * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config); /* * If the fb is shared between multiple heads, we'll * just get the first one. */ intel_find_initial_plane_obj(crtc, &plane_config); plane_config_fini(&plane_config); }
linux-master
drivers/gpu/drm/i915/display/intel_plane_initial.c
/* * Copyright © 2006-2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/math.h> #include <linux/string_helpers.h> #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_hti.h" #include "intel_mg_phy_regs.h" #include "intel_pch_refclk.h" #include "intel_tc.h" /** * DOC: Display PLLs * * Display PLLs used for driving outputs vary by platform. While some have * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL * from a pool. In the latter scenario, it is possible that multiple pipes * share a PLL if their configurations match. * * This file provides an abstraction over display PLLs. The function * intel_shared_dpll_init() initializes the PLLs for the given platform. The * users of a PLL are tracked and that tracking is integrated with the atomic * modset interface. During an atomic operation, required PLLs can be reserved * for a given CRTC and encoder configuration by calling * intel_reserve_shared_dplls() and previously reserved PLLs can be released * with intel_release_shared_dplls(). * Changes to the users are first staged in the atomic state, and then made * effective by calling intel_shared_dpll_swap_state() during the atomic * commit phase. */ /* platform specific hooks for managing DPLLs */ struct intel_shared_dpll_funcs { /* * Hook for enabling the pll, called from intel_enable_shared_dpll() if * the pll is not already enabled. */ void (*enable)(struct drm_i915_private *i915, struct intel_shared_dpll *pll); /* * Hook for disabling the pll, called from intel_disable_shared_dpll() * only when it is safe to disable the pll, i.e., there are no more * tracked users for it. */ void (*disable)(struct drm_i915_private *i915, struct intel_shared_dpll *pll); /* * Hook for reading the values currently programmed to the DPLL * registers. This is used for initial hw state readout and state * verification after a mode set. */ bool (*get_hw_state)(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state); /* * Hook for calculating the pll's output frequency based on its passed * in state. */ int (*get_freq)(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state); }; struct intel_dpll_mgr { const struct dpll_info *dpll_info; int (*compute_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); int (*get_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); void (*put_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*update_active_dpll)(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); void (*update_ref_clks)(struct drm_i915_private *i915); void (*dump_hw_state)(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); }; static void intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll_state *shared_dpll) { enum intel_dpll_id i; /* Copy shared dpll state */ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; shared_dpll[i] = pll->state; } } static struct intel_shared_dpll_state * intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); if (!state->dpll_set) { state->dpll_set = true; intel_atomic_duplicate_dpll_state(to_i915(s->dev), state->shared_dpll); } return state->shared_dpll; } /** * intel_get_shared_dpll_by_id - get a DPLL given its id * @dev_priv: i915 device instance * @id: pll id * * Returns: * A pointer to the DPLL with @id */ struct intel_shared_dpll * intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { return &dev_priv->display.dpll.shared_dplls[id]; } /* For ILK+ */ void assert_shared_dpll(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, bool state) { bool cur_state; struct intel_dpll_hw_state hw_state; if (drm_WARN(&dev_priv->drm, !pll, "asserting DPLL %s with no DPLL\n", str_on_off(state))) return; cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(dev_priv, cur_state != state, "%s assertion failure (expected %s, current %s)\n", pll->info->name, str_on_off(state), str_on_off(cur_state)); } static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) { return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1; } enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) { return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1; } static i915_reg_t intel_combo_pll_enable_reg(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { if (IS_DG1(i915)) return DG1_DPLL_ENABLE(pll->info->id); else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && (pll->info->id == DPLL_ID_EHL_DPLL4)) return MG_PLL_ENABLE(0); return ICL_DPLL_ENABLE(pll->info->id); } static i915_reg_t intel_tc_pll_enable_reg(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; enum tc_port tc_port = icl_pll_id_to_tc_port(id); if (IS_ALDERLAKE_P(i915)) return ADLP_PORTTC_PLL_ENABLE(tc_port); return MG_PLL_ENABLE(tc_port); } /** * intel_enable_shared_dpll - enable a CRTC's shared DPLL * @crtc_state: CRTC, and its state, which has a shared DPLL * * Enable the shared DPLL used by @crtc. */ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int pipe_mask = BIT(crtc->pipe); unsigned int old_mask; if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; mutex_lock(&dev_priv->display.dpll.lock); old_mask = pll->active_mask; if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) || drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask)) goto out; pll->active_mask |= pipe_mask; drm_dbg_kms(&dev_priv->drm, "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, crtc->base.base.id, crtc->base.name); if (old_mask) { drm_WARN_ON(&dev_priv->drm, !pll->on); assert_shared_dpll_enabled(dev_priv, pll); goto out; } drm_WARN_ON(&dev_priv->drm, pll->on); drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name); pll->info->funcs->enable(dev_priv, pll); pll->on = true; out: mutex_unlock(&dev_priv->display.dpll.lock); } /** * intel_disable_shared_dpll - disable a CRTC's shared DPLL * @crtc_state: CRTC, and its state, which has a shared DPLL * * Disable the shared DPLL used by @crtc. */ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int pipe_mask = BIT(crtc->pipe); /* PCH only available on ILK+ */ if (DISPLAY_VER(dev_priv) < 5) return; if (pll == NULL) return; mutex_lock(&dev_priv->display.dpll.lock); if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask), "%s not used by [CRTC:%d:%s]\n", pll->info->name, crtc->base.base.id, crtc->base.name)) goto out; drm_dbg_kms(&dev_priv->drm, "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, crtc->base.base.id, crtc->base.name); assert_shared_dpll_enabled(dev_priv, pll); drm_WARN_ON(&dev_priv->drm, !pll->on); pll->active_mask &= ~pipe_mask; if (pll->active_mask) goto out; drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name); pll->info->funcs->disable(dev_priv, pll); pll->on = false; out: mutex_unlock(&dev_priv->display.dpll.lock); } static struct intel_shared_dpll * intel_find_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_dpll_hw_state *pll_state, unsigned long dpll_mask) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll, *unused_pll = NULL; struct intel_shared_dpll_state *shared_dpll; enum intel_dpll_id i; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { pll = &dev_priv->display.dpll.shared_dplls[i]; /* Only want to check enabled timings first */ if (shared_dpll[i].pipe_mask == 0) { if (!unused_pll) unused_pll = pll; continue; } if (memcmp(pll_state, &shared_dpll[i].hw_state, sizeof(*pll_state)) == 0) { drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n", crtc->base.base.id, crtc->base.name, pll->info->name, shared_dpll[i].pipe_mask, pll->active_mask); return pll; } } /* Ok no matching timings, maybe there's a free one? */ if (unused_pll) { drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n", crtc->base.base.id, crtc->base.name, unused_pll->info->name); return unused_pll; } return NULL; } /** * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC * @crtc: CRTC on which behalf the reference is taken * @pll: DPLL for which the reference is taken * @shared_dpll_state: the DPLL atomic state in which the reference is tracked * * Take a reference for @pll tracking the use of it by @crtc. */ static void intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, struct intel_shared_dpll_state *shared_dpll_state) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0); shared_dpll_state->pipe_mask |= BIT(crtc->pipe); drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); } static void intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); if (shared_dpll[id].pipe_mask == 0) shared_dpll[id].hw_state = *pll_state; intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); } /** * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC * @crtc: CRTC on which behalf the reference is dropped * @pll: DPLL for which the reference is dropped * @shared_dpll_state: the DPLL atomic state in which the reference is tracked * * Drop a reference for @pll tracking the end of use of it by @crtc. */ void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, struct intel_shared_dpll_state *shared_dpll_state) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0); shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe); drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); } static void intel_unreference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll) { struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); } static void intel_put_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); new_crtc_state->shared_dpll = NULL; if (!old_crtc_state->shared_dpll) return; intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll); } /** * intel_shared_dpll_swap_state - make atomic DPLL configuration effective * @state: atomic state * * This is the dpll version of drm_atomic_helper_swap_state() since the * helper does not handle driver-specific global state. * * For consistency with atomic helpers this function does a complete swap, * i.e. it also puts the current state into @state, even though there is no * need for that at this moment. */ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll = state->shared_dpll; enum intel_dpll_id i; if (!state->dpll_set) return; for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; swap(pll->state, shared_dpll[i]); } } static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, PCH_DPLL(id)); hw_state->dpll = val; hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id)); hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id)); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & DPLL_VCO_ENABLE; } static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) { u32 val; bool enabled; val = intel_de_read(dev_priv, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); I915_STATE_WARN(dev_priv, !enabled, "PCH refclk assertion failure, should be active but is disabled\n"); } static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; /* PCH refclock must be enabled first */ ibx_assert_pch_refclk_enabled(dev_priv); intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0); intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1); intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); /* Wait for the clocks to stabilize. */ intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(150); /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(200); } static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; intel_de_write(dev_priv, PCH_DPLL(id), 0); intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(200); } static int ibx_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { return 0; } static int ibx_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id i; if (HAS_PCH_IBX(dev_priv)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->display.dpll.shared_dplls[i]; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); } else { pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, BIT(DPLL_ID_PCH_PLL_B) | BIT(DPLL_ID_PCH_PLL_A)); } if (!pll) return -EINVAL; /* reference the pll */ intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; return 0; } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", hw_state->dpll, hw_state->dpll_md, hw_state->fp0, hw_state->fp1); } static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { .enable = ibx_pch_dpll_enable, .disable = ibx_pch_dpll_disable, .get_hw_state = ibx_pch_dpll_get_hw_state, }; static const struct dpll_info pch_plls[] = { { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, { }, }; static const struct intel_dpll_mgr pch_pll_mgr = { .dpll_info = pch_plls, .compute_dplls = ibx_compute_dpll, .get_dplls = ibx_get_dpll, .put_dplls = intel_put_dpll, .dump_hw_state = ibx_dump_hw_state, }; static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll); intel_de_posting_read(dev_priv, WRPLL_CTL(id)); udelay(20); } static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll); intel_de_posting_read(dev_priv, SPLL_CTL); udelay(20); } static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, WRPLL_CTL(id)); /* * Try to set up the PCH reference clock once all DPLLs * that depend on it have been shut down. */ if (dev_priv->display.dpll.pch_ssc_use & BIT(id)) intel_init_pch_refclk(dev_priv); } static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum intel_dpll_id id = pll->info->id; intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, SPLL_CTL); /* * Try to set up the PCH reference clock once all DPLLs * that depend on it have been shut down. */ if (dev_priv->display.dpll.pch_ssc_use & BIT(id)) intel_init_pch_refclk(dev_priv); } static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, WRPLL_CTL(id)); hw_state->wrpll = val; intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & WRPLL_PLL_ENABLE; } static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { intel_wakeref_t wakeref; u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, SPLL_CTL); hw_state->spll = val; intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & SPLL_PLL_ENABLE; } #define LC_FREQ 2700 #define LC_FREQ_2K U64_C(LC_FREQ * 2000) #define P_MIN 2 #define P_MAX 64 #define P_INC 2 /* Constraints for PLL good behavior */ #define REF_MIN 48 #define REF_MAX 400 #define VCO_MIN 2400 #define VCO_MAX 4800 struct hsw_wrpll_rnp { unsigned p, n2, r2; }; static unsigned hsw_wrpll_get_budget_for_freq(int clock) { switch (clock) { case 25175000: case 25200000: case 27000000: case 27027000: case 37762500: case 37800000: case 40500000: case 40541000: case 54000000: case 54054000: case 59341000: case 59400000: case 72000000: case 74176000: case 74250000: case 81000000: case 81081000: case 89012000: case 89100000: case 108000000: case 108108000: case 111264000: case 111375000: case 148352000: case 148500000: case 162000000: case 162162000: case 222525000: case 222750000: case 296703000: case 297000000: return 0; case 233500000: case 245250000: case 247750000: case 253250000: case 298000000: return 1500; case 169128000: case 169500000: case 179500000: case 202000000: return 2000; case 256250000: case 262500000: case 270000000: case 272500000: case 273750000: case 280750000: case 281250000: case 286000000: case 291750000: return 4000; case 267250000: case 268500000: return 5000; default: return 1000; } } static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget, unsigned int r2, unsigned int n2, unsigned int p, struct hsw_wrpll_rnp *best) { u64 a, b, c, d, diff, diff_best; /* No best (r,n,p) yet */ if (best->p == 0) { best->p = p; best->n2 = n2; best->r2 = r2; return; } /* * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to * freq2k. * * delta = 1e6 * * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / * freq2k; * * and we would like delta <= budget. * * If the discrepancy is above the PPM-based budget, always prefer to * improve upon the previous solution. However, if you're within the * budget, try to maximize Ref * VCO, that is N / (P * R^2). */ a = freq2k * budget * p * r2; b = freq2k * budget * best->p * best->r2; diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2); diff_best = abs_diff(freq2k * best->p * best->r2, LC_FREQ_2K * best->n2); c = 1000000 * diff; d = 1000000 * diff_best; if (a < c && b < d) { /* If both are above the budget, pick the closer */ if (best->p * best->r2 * diff < p * r2 * diff_best) { best->p = p; best->n2 = n2; best->r2 = r2; } } else if (a >= c && b < d) { /* If A is below the threshold but B is above it? Update. */ best->p = p; best->n2 = n2; best->r2 = r2; } else if (a >= c && b >= d) { /* Both are below the limit, so pick the higher n2/(r2*r2) */ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { best->p = p; best->n2 = n2; best->r2 = r2; } } /* Otherwise a < c && b >= d, do nothing */ } static void hsw_ddi_calculate_wrpll(int clock /* in Hz */, unsigned *r2_out, unsigned *n2_out, unsigned *p_out) { u64 freq2k; unsigned p, n2, r2; struct hsw_wrpll_rnp best = {}; unsigned budget; freq2k = clock / 100; budget = hsw_wrpll_get_budget_for_freq(clock); /* Special case handling for 540 pixel clock: bypass WR PLL entirely * and directly pass the LC PLL to it. */ if (freq2k == 5400000) { *n2_out = 2; *p_out = 1; *r2_out = 2; return; } /* * Ref = LC_FREQ / R, where Ref is the actual reference input seen by * the WR PLL. * * We want R so that REF_MIN <= Ref <= REF_MAX. * Injecting R2 = 2 * R gives: * REF_MAX * r2 > LC_FREQ * 2 and * REF_MIN * r2 < LC_FREQ * 2 * * Which means the desired boundaries for r2 are: * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN * */ for (r2 = LC_FREQ * 2 / REF_MAX + 1; r2 <= LC_FREQ * 2 / REF_MIN; r2++) { /* * VCO = N * Ref, that is: VCO = N * LC_FREQ / R * * Once again we want VCO_MIN <= VCO <= VCO_MAX. * Injecting R2 = 2 * R and N2 = 2 * N, we get: * VCO_MAX * r2 > n2 * LC_FREQ and * VCO_MIN * r2 < n2 * LC_FREQ) * * Which means the desired boundaries for n2 are: * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ */ for (n2 = VCO_MIN * r2 / LC_FREQ + 1; n2 <= VCO_MAX * r2 / LC_FREQ; n2++) { for (p = P_MIN; p <= P_MAX; p += P_INC) hsw_wrpll_update_rnp(freq2k, budget, r2, n2, p, &best); } } *n2_out = best.n2; *p_out = best.p; *r2_out = best.r2; } static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int refclk; int n, p, r; u32 wrpll = pll_state->wrpll; switch (wrpll & WRPLL_REF_MASK) { case WRPLL_REF_SPECIAL_HSW: /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) { refclk = dev_priv->display.dpll.ref_clks.nssc; break; } fallthrough; case WRPLL_REF_PCH_SSC: /* * We could calculate spread here, but our checking * code only cares about 5% accuracy, and spread is a max of * 0.5% downspread. */ refclk = dev_priv->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; break; default: MISSING_CASE(wrpll); return 0; } r = wrpll & WRPLL_DIVIDER_REF_MASK; p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; /* Convert to KHz, p & r have a fixed point portion */ return (refclk * n / 10) / (p * r) * 2; } static int hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); unsigned int p, n2, r2; hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); crtc_state->dpll_hw_state.wrpll = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL, &crtc_state->dpll_hw_state); return 0; } static struct intel_shared_dpll * hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, BIT(DPLL_ID_WRPLL2) | BIT(DPLL_ID_WRPLL1)); } static int hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int clock = crtc_state->port_clock; switch (clock / 2) { case 81000: case 135000: case 270000: return 0; default: drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n", clock); return -EINVAL; } } static struct intel_shared_dpll * hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; int clock = crtc_state->port_clock; switch (clock / 2) { case 81000: pll_id = DPLL_ID_LCPLL_810; break; case 135000: pll_id = DPLL_ID_LCPLL_1350; break; case 270000: pll_id = DPLL_ID_LCPLL_2700; break; default: MISSING_CASE(clock / 2); return NULL; } pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); if (!pll) return NULL; return pll; } static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; switch (pll->info->id) { case DPLL_ID_LCPLL_810: link_clock = 81000; break; case DPLL_ID_LCPLL_1350: link_clock = 135000; break; case DPLL_ID_LCPLL_2700: link_clock = 270000; break; default: drm_WARN(&i915->drm, 1, "bad port clock sel\n"); break; } return link_clock * 2; } static int hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000)) return -EINVAL; crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; return 0; } static struct intel_shared_dpll * hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, BIT(DPLL_ID_SPLL)); } static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; switch (pll_state->spll & SPLL_FREQ_MASK) { case SPLL_FREQ_810MHz: link_clock = 81000; break; case SPLL_FREQ_1350MHz: link_clock = 135000; break; case SPLL_FREQ_2700MHz: link_clock = 270000; break; default: drm_WARN(&i915->drm, 1, "bad spll freq\n"); break; } return link_clock * 2; } static int hsw_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return hsw_ddi_wrpll_compute_dpll(state, crtc); else if (intel_crtc_has_dp_encoder(crtc_state)) return hsw_ddi_lcpll_compute_dpll(crtc_state); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) return hsw_ddi_spll_compute_dpll(state, crtc); else return -EINVAL; } static int hsw_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll = NULL; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) pll = hsw_ddi_wrpll_get_dpll(state, crtc); else if (intel_crtc_has_dp_encoder(crtc_state)) pll = hsw_ddi_lcpll_get_dpll(crtc_state); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) pll = hsw_ddi_spll_get_dpll(state, crtc); if (!pll) return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; return 0; } static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) { i915->display.dpll.ref_clks.ssc = 135000; /* Non-SSC is only used on non-ULT HSW. */ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT) i915->display.dpll.ref_clks.nssc = 24000; else i915->display.dpll.ref_clks.nssc = 135000; } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", hw_state->wrpll, hw_state->spll); } static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { .enable = hsw_ddi_wrpll_enable, .disable = hsw_ddi_wrpll_disable, .get_hw_state = hsw_ddi_wrpll_get_hw_state, .get_freq = hsw_ddi_wrpll_get_freq, }; static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { .enable = hsw_ddi_spll_enable, .disable = hsw_ddi_spll_disable, .get_hw_state = hsw_ddi_spll_get_hw_state, .get_freq = hsw_ddi_spll_get_freq, }; static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { } static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { } static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { return true; } static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { .enable = hsw_ddi_lcpll_enable, .disable = hsw_ddi_lcpll_disable, .get_hw_state = hsw_ddi_lcpll_get_hw_state, .get_freq = hsw_ddi_lcpll_get_freq, }; static const struct dpll_info hsw_plls[] = { { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, { }, }; static const struct intel_dpll_mgr hsw_pll_mgr = { .dpll_info = hsw_plls, .compute_dplls = hsw_compute_dpll, .get_dplls = hsw_get_dpll, .put_dplls = intel_put_dpll, .update_ref_clks = hsw_update_dpll_ref_clks, .dump_hw_state = hsw_dump_hw_state, }; struct skl_dpll_regs { i915_reg_t ctl, cfgcr1, cfgcr2; }; /* this array is indexed by the *shared* pll id */ static const struct skl_dpll_regs skl_dpll_regs[4] = { { /* DPLL 0 */ .ctl = LCPLL1_CTL, /* DPLL 0 doesn't support HDMI mode */ }, { /* DPLL 1 */ .ctl = LCPLL2_CTL, .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), }, { /* DPLL 2 */ .ctl = WRPLL_CTL(0), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), }, { /* DPLL 3 */ .ctl = WRPLL_CTL(1), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), }, }; static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; intel_de_rmw(dev_priv, DPLL_CTRL1, DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), pll->state.hw_state.ctrl1 << (id * 6)); intel_de_posting_read(dev_priv, DPLL_CTRL1); } static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; skl_ddi_pll_write_ctrl1(dev_priv, pll); intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1); intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2); intel_de_posting_read(dev_priv, regs[id].cfgcr1); intel_de_posting_read(dev_priv, regs[id].cfgcr2); /* the enable bit is always bit 31 */ intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); } static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { skl_ddi_pll_write_ctrl1(dev_priv, pll); } static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, regs[id].ctl); } static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { } static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { u32 val; const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; ret = false; val = intel_de_read(dev_priv, regs[id].ctl); if (!(val & LCPLL_PLL_ENABLE)) goto out; val = intel_de_read(dev_priv, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; /* avoid reading back stale values if HDMI mode is not enabled */ if (val & DPLL_CTRL1_HDMI_MODE(id)) { hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1); hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2); } ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; u32 val; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; ret = false; /* DPLL0 is always enabled since it drives CDCLK */ val = intel_de_read(dev_priv, regs[id].ctl); if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE))) goto out; val = intel_de_read(dev_priv, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } struct skl_wrpll_context { u64 min_deviation; /* current minimal deviation */ u64 central_freq; /* chosen central freq */ u64 dco_freq; /* chosen dco freq */ unsigned int p; /* chosen divider */ }; /* DCO freq must be within +1%/-6% of the DCO central freq */ #define SKL_DCO_MAX_PDEVIATION 100 #define SKL_DCO_MAX_NDEVIATION 600 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, u64 central_freq, u64 dco_freq, unsigned int divider) { u64 deviation; deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), central_freq); /* positive deviation */ if (dco_freq >= central_freq) { if (deviation < SKL_DCO_MAX_PDEVIATION && deviation < ctx->min_deviation) { ctx->min_deviation = deviation; ctx->central_freq = central_freq; ctx->dco_freq = dco_freq; ctx->p = divider; } /* negative deviation */ } else if (deviation < SKL_DCO_MAX_NDEVIATION && deviation < ctx->min_deviation) { ctx->min_deviation = deviation; ctx->central_freq = central_freq; ctx->dco_freq = dco_freq; ctx->p = divider; } } static void skl_wrpll_get_multipliers(unsigned int p, unsigned int *p0 /* out */, unsigned int *p1 /* out */, unsigned int *p2 /* out */) { /* even dividers */ if (p % 2 == 0) { unsigned int half = p / 2; if (half == 1 || half == 2 || half == 3 || half == 5) { *p0 = 2; *p1 = 1; *p2 = half; } else if (half % 2 == 0) { *p0 = 2; *p1 = half / 2; *p2 = 2; } else if (half % 3 == 0) { *p0 = 3; *p1 = half / 3; *p2 = 2; } else if (half % 7 == 0) { *p0 = 7; *p1 = half / 7; *p2 = 2; } } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ *p0 = 3; *p1 = 1; *p2 = p / 3; } else if (p == 5 || p == 7) { *p0 = p; *p1 = 1; *p2 = 1; } else if (p == 15) { *p0 = 3; *p1 = 1; *p2 = 5; } else if (p == 21) { *p0 = 7; *p1 = 1; *p2 = 3; } else if (p == 35) { *p0 = 7; *p1 = 1; *p2 = 5; } } struct skl_wrpll_params { u32 dco_fraction; u32 dco_integer; u32 qdiv_ratio; u32 qdiv_mode; u32 kdiv; u32 pdiv; u32 central_freq; }; static void skl_wrpll_params_populate(struct skl_wrpll_params *params, u64 afe_clock, int ref_clock, u64 central_freq, u32 p0, u32 p1, u32 p2) { u64 dco_freq; switch (central_freq) { case 9600000000ULL: params->central_freq = 0; break; case 9000000000ULL: params->central_freq = 1; break; case 8400000000ULL: params->central_freq = 3; } switch (p0) { case 1: params->pdiv = 0; break; case 2: params->pdiv = 1; break; case 3: params->pdiv = 2; break; case 7: params->pdiv = 4; break; default: WARN(1, "Incorrect PDiv\n"); } switch (p2) { case 5: params->kdiv = 0; break; case 2: params->kdiv = 1; break; case 3: params->kdiv = 2; break; case 1: params->kdiv = 3; break; default: WARN(1, "Incorrect KDiv\n"); } params->qdiv_ratio = p1; params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; dco_freq = p0 * p1 * p2 * afe_clock; /* * Intermediate values are in Hz. * Divide by MHz to match bsepc */ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1)); params->dco_fraction = div_u64((div_u64(dco_freq, ref_clock / KHz(1)) - params->dco_integer * MHz(1)) * 0x8000, MHz(1)); } static int skl_ddi_calculate_wrpll(int clock /* in Hz */, int ref_clock, struct skl_wrpll_params *wrpll_params) { static const u64 dco_central_freq[3] = { 8400000000ULL, 9000000000ULL, 9600000000ULL }; static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30, 32, 36, 40, 42, 44, 48, 52, 54, 56, 60, 64, 66, 68, 70, 72, 76, 78, 80, 84, 88, 90, 92, 96, 98 }; static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; static const struct { const u8 *list; int n_dividers; } dividers[] = { { even_dividers, ARRAY_SIZE(even_dividers) }, { odd_dividers, ARRAY_SIZE(odd_dividers) }, }; struct skl_wrpll_context ctx = { .min_deviation = U64_MAX, }; unsigned int dco, d, i; unsigned int p0, p1, p2; u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ for (d = 0; d < ARRAY_SIZE(dividers); d++) { for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { for (i = 0; i < dividers[d].n_dividers; i++) { unsigned int p = dividers[d].list[i]; u64 dco_freq = p * afe_clock; skl_wrpll_try_divider(&ctx, dco_central_freq[dco], dco_freq, p); /* * Skip the remaining dividers if we're sure to * have found the definitive divider, we can't * improve a 0 deviation. */ if (ctx.min_deviation == 0) goto skip_remaining_dividers; } } skip_remaining_dividers: /* * If a solution is found with an even divider, prefer * this one. */ if (d == 0 && ctx.p) break; } if (!ctx.p) return -EINVAL; /* * gcc incorrectly analyses that these can be used without being * initialized. To be fair, it's hard to guess. */ p0 = p1 = p2 = 0; skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock, ctx.central_freq, p0, p1, p2); return 0; } static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int ref_clock = i915->display.dpll.ref_clks.nssc; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; else p1 = 1; switch (p0) { case DPLL_CFGCR2_PDIV_1: p0 = 1; break; case DPLL_CFGCR2_PDIV_2: p0 = 2; break; case DPLL_CFGCR2_PDIV_3: p0 = 3; break; case DPLL_CFGCR2_PDIV_7_INVALID: /* * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0, * handling it the same way as PDIV_7. */ drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n"); fallthrough; case DPLL_CFGCR2_PDIV_7: p0 = 7; break; default: MISSING_CASE(p0); return 0; } switch (p2) { case DPLL_CFGCR2_KDIV_5: p2 = 5; break; case DPLL_CFGCR2_KDIV_2: p2 = 2; break; case DPLL_CFGCR2_KDIV_3: p2 = 3; break; case DPLL_CFGCR2_KDIV_1: p2 = 1; break; default: MISSING_CASE(p2); return 0; } dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * ref_clock; dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * ref_clock / 0x8000; if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0)) return 0; return dco_freq / (p0 * p1 * p2 * 5); } static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wrpll_params wrpll_params = {}; u32 ctrl1, cfgcr1, cfgcr2; int ret; /* * See comment in intel_dpll_hw_state to understand why we always use 0 * as the DPLL id in this function. */ ctrl1 = DPLL_CTRL1_OVERRIDE(0); ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, i915->display.dpll.ref_clks.nssc, &wrpll_params); if (ret) return ret; cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | wrpll_params.dco_integer; cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL, &crtc_state->dpll_hw_state); return 0; } static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { u32 ctrl1; /* * See comment in intel_dpll_hw_state to understand why we always use 0 * as the DPLL id in this function. */ ctrl1 = DPLL_CTRL1_OVERRIDE(0); switch (crtc_state->port_clock / 2) { case 81000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); break; case 135000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); break; case 270000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); break; /* eDP 1.4 rates */ case 162000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); break; case 108000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); break; case 216000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0); break; } crtc_state->dpll_hw_state.ctrl1 = ctrl1; return 0; } static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >> DPLL_CTRL1_LINK_RATE_SHIFT(0)) { case DPLL_CTRL1_LINK_RATE_810: link_clock = 81000; break; case DPLL_CTRL1_LINK_RATE_1080: link_clock = 108000; break; case DPLL_CTRL1_LINK_RATE_1350: link_clock = 135000; break; case DPLL_CTRL1_LINK_RATE_1620: link_clock = 162000; break; case DPLL_CTRL1_LINK_RATE_2160: link_clock = 216000; break; case DPLL_CTRL1_LINK_RATE_2700: link_clock = 270000; break; default: drm_WARN(&i915->drm, 1, "Unsupported link rate\n"); break; } return link_clock * 2; } static int skl_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return skl_ddi_hdmi_pll_dividers(crtc_state); else if (intel_crtc_has_dp_encoder(crtc_state)) return skl_ddi_dp_set_dpll_hw_state(crtc_state); else return -EINVAL; } static int skl_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, BIT(DPLL_ID_SKL_DPLL0)); else pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, BIT(DPLL_ID_SKL_DPLL3) | BIT(DPLL_ID_SKL_DPLL2) | BIT(DPLL_ID_SKL_DPLL1)); if (!pll) return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; return 0; } static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { /* * ctrl1 register is already shifted for each pll, just use 0 to get * the internal shift for each field */ if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) return skl_ddi_wrpll_get_freq(i915, pll, pll_state); else return skl_ddi_lcpll_get_freq(i915, pll, pll_state); } static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2); } static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { .enable = skl_ddi_pll_enable, .disable = skl_ddi_pll_disable, .get_hw_state = skl_ddi_pll_get_hw_state, .get_freq = skl_ddi_pll_get_freq, }; static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { .enable = skl_ddi_dpll0_enable, .disable = skl_ddi_dpll0_disable, .get_hw_state = skl_ddi_dpll0_get_hw_state, .get_freq = skl_ddi_pll_get_freq, }; static const struct dpll_info skl_plls[] = { { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, { }, }; static const struct intel_dpll_mgr skl_pll_mgr = { .dpll_info = skl_plls, .compute_dplls = skl_compute_dpll, .get_dplls = skl_get_dpll, .put_dplls = intel_put_dpll, .update_ref_clks = skl_update_dpll_ref_clks, .dump_hw_state = skl_dump_hw_state, }; static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { u32 temp; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* Non-SSC reference */ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); if (IS_GEMINILAKE(dev_priv)) { intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_POWER_ENABLE); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) drm_err(&dev_priv->drm, "Power state not set for PLL:%d\n", port); } /* Disable 10 bit clock */ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), PORT_PLL_10BIT_CLK_ENABLE, 0); /* Write P1 & P2 */ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); /* Write M2 integer */ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0), PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); /* Write N */ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1), PORT_PLL_N_MASK, pll->state.hw_state.pll1); /* Write M2 fraction */ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2), PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); /* Write M2 fraction enable */ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3), PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); /* Write coeff */ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); temp &= ~PORT_PLL_PROP_COEFF_MASK; temp &= ~PORT_PLL_INT_COEFF_MASK; temp &= ~PORT_PLL_GAIN_CTL_MASK; temp |= pll->state.hw_state.pll6; intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8), PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9), PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; temp &= ~PORT_PLL_DCO_AMP_MASK; temp |= pll->state.hw_state.pll10; intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp); /* Recalibrate with new settings */ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); temp |= PORT_PLL_RECALIBRATE; intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; temp |= pll->state.hw_state.ebb4; intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 200)) drm_err(&dev_priv->drm, "PLL %d not locked\n", port); if (IS_GEMINILAKE(dev_priv)) { temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch)); temp |= DCC_DELAY_RANGE_2; intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp); } /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch)); temp &= ~LANE_STAGGER_MASK; temp &= ~LANESTAGGER_STRAP_OVRD; temp |= pll->state.hw_state.pcsdw12; intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp); } static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (IS_GEMINILAKE(dev_priv)) { intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_POWER_ENABLE, 0); if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) drm_err(&dev_priv->drm, "Power state not reset for PLL:%d\n", port); } } static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_wakeref_t wakeref; enum dpio_phy phy; enum dpio_channel ch; u32 val; bool ret; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; ret = false; val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (!(val & PORT_PLL_ENABLE)) goto out; hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); hw_state->pll0 &= PORT_PLL_M2_INT_MASK; hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); hw_state->pll1 &= PORT_PLL_N_MASK; hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | PORT_PLL_INT_COEFF_MASK | PORT_PLL_GAIN_CTL_MASK; hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | PORT_PLL_DCO_AMP_MASK; /* * While we write to the group register to program all lanes at once we * can read only lane registers. We configure all lanes the same way, so * here just read out lanes 0/1 and output a note if lanes 2/3 differ. */ hw_state->pcsdw12 = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch)); if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) drm_dbg(&dev_priv->drm, "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", hw_state->pcsdw12, intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch))); hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } /* pre-calculated values for DP linkrates */ static const struct dpll bxt_dp_clk_val[] = { /* m2 is .22 binary fixed point */ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ }, { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ }, { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ }, { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, }; static int bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* Calculate HDMI div */ /* * FIXME: tie the following calculation into * i9xx_crtc_compute_clock */ if (!bxt_find_best_dpll(crtc_state, clk_div)) return -EINVAL; drm_WARN_ON(&i915->drm, clk_div->m1 != 2); return 0; } static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int i; *clk_div = bxt_dp_clk_val[0]; for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) { *clk_div = bxt_dp_clk_val[i]; break; } } chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div); drm_WARN_ON(&i915->drm, clk_div->vco == 0 || clk_div->dot != crtc_state->port_clock); } static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, const struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; int clock = crtc_state->port_clock; int vco = clk_div->vco; u32 prop_coef, int_coef, gain_ctl, targ_cnt; u32 lanestagger; if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; int_coef = 9; gain_ctl = 3; targ_cnt = 8; } else if ((vco > 5400000 && vco < 6200000) || (vco >= 4800000 && vco < 5400000)) { prop_coef = 5; int_coef = 11; gain_ctl = 3; targ_cnt = 9; } else if (vco == 5400000) { prop_coef = 3; int_coef = 8; gain_ctl = 1; targ_cnt = 9; } else { drm_err(&i915->drm, "Invalid VCO\n"); return -EINVAL; } if (clock > 270000) lanestagger = 0x18; else if (clock > 135000) lanestagger = 0x0d; else if (clock > 67000) lanestagger = 0x07; else if (clock > 33000) lanestagger = 0x04; else lanestagger = 0x02; dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22); dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff); if (clk_div->m2 & 0x3fffff) dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) | PORT_PLL_INT_COEFF(int_coef) | PORT_PLL_GAIN_CTL(gain_ctl); dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt); dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5); dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) | PORT_PLL_DCO_AMP_OVR_EN_H; dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; return 0; } static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { struct dpll clock; clock.m1 = 2; clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock); } static int bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct dpll clk_div = {}; bxt_ddi_dp_pll_dividers(crtc_state, &clk_div); return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } static int bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct dpll clk_div = {}; int ret; bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); if (ret) return ret; crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL, &crtc_state->dpll_hw_state); return 0; } static int bxt_compute_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state); else if (intel_crtc_has_dp_encoder(crtc_state)) return bxt_ddi_dp_set_dpll_hw_state(crtc_state); else return -EINVAL; } static int bxt_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; pll = intel_get_shared_dpll_by_id(dev_priv, id); drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; return 0; } static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) { i915->display.dpll.ref_clks.ssc = 100000; i915->display.dpll.ref_clks.nssc = 100000; /* DSI non-SSC ref 19.2MHz */ } static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", hw_state->ebb0, hw_state->ebb4, hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3, hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10, hw_state->pcsdw12); } static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { .enable = bxt_ddi_pll_enable, .disable = bxt_ddi_pll_disable, .get_hw_state = bxt_ddi_pll_get_hw_state, .get_freq = bxt_ddi_pll_get_freq, }; static const struct dpll_info bxt_plls[] = { { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, { }, }; static const struct intel_dpll_mgr bxt_pll_mgr = { .dpll_info = bxt_plls, .compute_dplls = bxt_compute_dpll, .get_dplls = bxt_get_dpll, .put_dplls = intel_put_dpll, .update_ref_clks = bxt_update_dpll_ref_clks, .dump_hw_state = bxt_dump_hw_state, }; static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv, int *qdiv, int *kdiv) { /* even dividers */ if (bestdiv % 2 == 0) { if (bestdiv == 2) { *pdiv = 2; *qdiv = 1; *kdiv = 1; } else if (bestdiv % 4 == 0) { *pdiv = 2; *qdiv = bestdiv / 4; *kdiv = 2; } else if (bestdiv % 6 == 0) { *pdiv = 3; *qdiv = bestdiv / 6; *kdiv = 2; } else if (bestdiv % 5 == 0) { *pdiv = 5; *qdiv = bestdiv / 10; *kdiv = 2; } else if (bestdiv % 14 == 0) { *pdiv = 7; *qdiv = bestdiv / 14; *kdiv = 2; } } else { if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) { *pdiv = bestdiv; *qdiv = 1; *kdiv = 1; } else { /* 9, 15, 21 */ *pdiv = bestdiv / 3; *qdiv = 1; *kdiv = 3; } } } static void icl_wrpll_params_populate(struct skl_wrpll_params *params, u32 dco_freq, u32 ref_freq, int pdiv, int qdiv, int kdiv) { u32 dco; switch (kdiv) { case 1: params->kdiv = 1; break; case 2: params->kdiv = 2; break; case 3: params->kdiv = 4; break; default: WARN(1, "Incorrect KDiv\n"); } switch (pdiv) { case 2: params->pdiv = 1; break; case 3: params->pdiv = 2; break; case 5: params->pdiv = 4; break; case 7: params->pdiv = 8; break; default: WARN(1, "Incorrect PDiv\n"); } WARN_ON(kdiv != 2 && qdiv != 1); params->qdiv_ratio = qdiv; params->qdiv_mode = (qdiv == 1) ? 0 : 1; dco = div_u64((u64)dco_freq << 15, ref_freq); params->dco_integer = dco >> 15; params->dco_fraction = dco & 0x7fff; } /* * Display WA #22010492432: ehl, tgl, adl-s, adl-p * Program half of the nominal DCO divider fraction value. */ static bool ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) { return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) && IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && i915->display.dpll.ref_clks.nssc == 38400; } struct icl_combo_pll_params { int clock; struct skl_wrpll_params wrpll; }; /* * These values alrea already adjusted: they're the bits we write to the * registers, not the logical values. */ static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = { { 540000, { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 270000, { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 162000, { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 324000, { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 216000, { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, { 432000, { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 648000, { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 810000, { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; /* Also used for 38.4 MHz values. */ static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = { { 540000, { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 270000, { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 162000, { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 324000, { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 216000, { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, { 432000, { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 648000, { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, { 810000, { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { .dco_integer = 0x151, .dco_fraction = 0x4000, .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }; static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { .dco_integer = 0x1A5, .dco_fraction = 0x7000, .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }; static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = { .dco_integer = 0x54, .dco_fraction = 0x3000, /* the following params are unused */ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, }; static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { .dco_integer = 0x43, .dco_fraction = 0x4000, /* the following params are unused */ }; static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = dev_priv->display.dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; int i; for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { if (clock == params[i].clock) { *pll_params = params[i].wrpll; return 0; } } MISSING_CASE(clock); return -EINVAL; } static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (DISPLAY_VER(dev_priv) >= 12) { switch (dev_priv->display.dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: *pll_params = tgl_tbt_pll_19_2MHz_values; break; case 24000: *pll_params = tgl_tbt_pll_24MHz_values; break; } } else { switch (dev_priv->display.dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: *pll_params = icl_tbt_pll_19_2MHz_values; break; case 24000: *pll_params = icl_tbt_pll_24MHz_values; break; } } return 0; } static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { /* * The PLL outputs multiple frequencies at the same time, selection is * made at DDI clock mux level. */ drm_WARN_ON(&i915->drm, 1); return 0; } static int icl_wrpll_ref_clock(struct drm_i915_private *i915) { int ref_clock = i915->display.dpll.ref_clks.nssc; /* * For ICL+, the spec states: if reference frequency is 38.4, * use 19.2 because the DPLL automatically divides that by 2. */ if (ref_clock == 38400) ref_clock = 19200; return ref_clock; } static int icl_calc_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int ref_clock = icl_wrpll_ref_clock(i915); u32 afe_clock = crtc_state->port_clock * 5; u32 dco_min = 7998000; u32 dco_max = 10000000; u32 dco_mid = (dco_min + dco_max) / 2; static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 30, 32, 36, 40, 42, 44, 48, 50, 52, 54, 56, 60, 64, 66, 68, 70, 72, 76, 78, 80, 84, 88, 90, 92, 96, 98, 100, 102, 3, 5, 7, 9, 15, 21 }; u32 dco, best_dco = 0, dco_centrality = 0; u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */ int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0; for (d = 0; d < ARRAY_SIZE(dividers); d++) { dco = afe_clock * dividers[d]; if (dco <= dco_max && dco >= dco_min) { dco_centrality = abs(dco - dco_mid); if (dco_centrality < best_dco_centrality) { best_dco_centrality = dco_centrality; best_div = dividers[d]; best_dco = dco; } } } if (best_div == 0) return -EINVAL; icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); return 0; } static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { int ref_clock = icl_wrpll_ref_clock(i915); u32 dco_fraction; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> DPLL_CFGCR1_QDIV_RATIO_SHIFT; else p1 = 1; switch (p0) { case DPLL_CFGCR1_PDIV_2: p0 = 2; break; case DPLL_CFGCR1_PDIV_3: p0 = 3; break; case DPLL_CFGCR1_PDIV_5: p0 = 5; break; case DPLL_CFGCR1_PDIV_7: p0 = 7; break; } switch (p2) { case DPLL_CFGCR1_KDIV_1: p2 = 1; break; case DPLL_CFGCR1_KDIV_2: p2 = 2; break; case DPLL_CFGCR1_KDIV_3: p2 = 3; break; } dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> DPLL_CFGCR0_DCO_FRACTION_SHIFT; if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction *= 2; dco_freq += (dco_fraction * ref_clock) / 0x8000; if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0)) return 0; return dco_freq / (p0 * p1 * p2 * 5); } static void icl_calc_dpll_state(struct drm_i915_private *i915, const struct skl_wrpll_params *pll_params, struct intel_dpll_hw_state *pll_state) { u32 dco_fraction = pll_params->dco_fraction; if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | pll_params->dco_integer; pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) | DPLL_CFGCR1_KDIV(pll_params->kdiv) | DPLL_CFGCR1_PDIV(pll_params->pdiv); if (DISPLAY_VER(i915) >= 12) pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; else pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; if (i915->display.vbt.override_afc_startup) pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val); } static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, u32 *target_dco_khz, struct intel_dpll_hw_state *state, bool is_dkl) { static const u8 div1_vals[] = { 7, 5, 3, 2 }; u32 dco_min_freq, dco_max_freq; unsigned int i; int div2; dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000; dco_max_freq = is_dp ? 8100000 : 10000000; for (i = 0; i < ARRAY_SIZE(div1_vals); i++) { int div1 = div1_vals[i]; for (div2 = 10; div2 > 0; div2--) { int dco = div1 * div2 * clock_khz * 5; int a_divratio, tlinedrv, inputsel; u32 hsdiv; if (dco < dco_min_freq || dco > dco_max_freq) continue; if (div2 >= 2) { /* * Note: a_divratio not matching TGL BSpec * algorithm but matching hardcoded values and * working on HW for DP alt-mode at least */ a_divratio = is_dp ? 10 : 5; tlinedrv = is_dkl ? 1 : 2; } else { a_divratio = 5; tlinedrv = 0; } inputsel = is_dp ? 0 : 1; switch (div1) { default: MISSING_CASE(div1); fallthrough; case 2: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; break; case 3: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3; break; case 5: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5; break; case 7: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7; break; } *target_dco_khz = dco; state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1); state->mg_clktop2_coreclkctl1 = MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio); state->mg_clktop2_hsclkctl = MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) | hsdiv | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); return 0; } } return -EINVAL; } /* * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int refclk_khz = dev_priv->display.dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; u32 prop_coeff, int_coeff; u32 tdc_targetcnt, feedfwgain; u64 ssc_stepsize, ssc_steplen, ssc_steplog; u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); bool is_dkl = DISPLAY_VER(dev_priv) >= 12; int ret; ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, pll_state, is_dkl); if (ret) return ret; m1div = 2; m2div_int = dco_khz / (refclk_khz * m1div); if (m2div_int > 255) { if (!is_dkl) { m1div = 4; m2div_int = dco_khz / (refclk_khz * m1div); } if (m2div_int > 255) return -EINVAL; } m2div_rem = dco_khz % (refclk_khz * m1div); tmp = (u64)m2div_rem * (1 << 22); do_div(tmp, refclk_khz * m1div); m2div_frac = tmp; switch (refclk_khz) { case 19200: iref_ndiv = 1; iref_trim = 28; iref_pulse_w = 1; break; case 24000: iref_ndiv = 1; iref_trim = 25; iref_pulse_w = 2; break; case 38400: iref_ndiv = 2; iref_trim = 28; iref_pulse_w = 1; break; default: MISSING_CASE(refclk_khz); return -EINVAL; } /* * tdc_res = 0.000003 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5) * * The multiplication by 1000 is due to refclk MHz to KHz conversion. It * was supposed to be a division, but we rearranged the operations of * the formula to avoid early divisions so we don't multiply the * rounding errors. * * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which * we also rearrange to work with integers. * * The 0.5 transformed to 5 results in a multiplication by 10 and the * last division by 10. */ tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10; /* * Here we divide dco_khz by 10 in order to allow the dividend to fit in * 32 bits. That's not a problem since we round the division down * anyway. */ feedfwgain = (use_ssc || m2div_rem > 0) ? m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0; if (dco_khz >= 9000000) { prop_coeff = 5; int_coeff = 10; } else { prop_coeff = 4; int_coeff = 8; } if (use_ssc) { tmp = mul_u32_u32(dco_khz, 47 * 32); do_div(tmp, refclk_khz * m1div * 10000); ssc_stepsize = tmp; tmp = mul_u32_u32(dco_khz, 1000); ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32); } else { ssc_stepsize = 0; ssc_steplen = 0; } ssc_steplog = 4; /* write pll_state calculations */ if (is_dkl) { pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); if (dev_priv->display.vbt.override_afc_startup) { u8 val = dev_priv->display.vbt.override_afc_startup_val; pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); } pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | DKL_PLL_SSC_STEP_LEN(ssc_steplen) | DKL_PLL_SSC_STEP_NUM(ssc_steplog) | (use_ssc ? DKL_PLL_SSC_EN : 0); pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac); pll_state->mg_pll_tdc_coldst_bias = DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) | DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain); } else { pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | MG_PLL_DIV0_FBDIV_INT(m2div_int); pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | MG_PLL_DIV1_DITHER_DIV_2 | MG_PLL_DIV1_NDIVRATIO(1) | MG_PLL_DIV1_FBPREDIV(m1div); pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | MG_PLL_LF_AFCCNTSEL_512 | MG_PLL_LF_GAINCTRL(1) | MG_PLL_LF_INT_COEFF(int_coeff) | MG_PLL_LF_PROP_COEFF(prop_coeff); pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | MG_PLL_FRAC_LOCK_DCODITHEREN | MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); if (use_ssc || m2div_rem > 0) pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) | MG_PLL_SSC_TYPE(2) | MG_PLL_SSC_STEPLENGTH(ssc_steplen) | MG_PLL_SSC_STEPNUM(ssc_steplog) | MG_PLL_SSC_FLLEN | MG_PLL_SSC_STEPSIZE(ssc_stepsize); pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | MG_PLL_TDC_COLDST_IREFINT_EN | MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | MG_PLL_TDC_TDCOVCCORR_EN | MG_PLL_TDC_TDCSEL(3); pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | MG_PLL_BIAS_INIT_DCOAMP(0x3F) | MG_PLL_BIAS_BIAS_BONUS(10) | MG_PLL_BIAS_BIASCAL_EN | MG_PLL_BIAS_CTRIM(12) | MG_PLL_BIAS_VREF_RDAC(4) | MG_PLL_BIAS_IREFTRIM(iref_trim); if (refclk_khz == 38400) { pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; pll_state->mg_pll_bias_mask = 0; } else { pll_state->mg_pll_tdc_coldst_bias_mask = -1U; pll_state->mg_pll_bias_mask = -1U; } pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; } return 0; } static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; ref_clock = dev_priv->display.dpll.ref_clks.nssc; if (DISPLAY_VER(dev_priv) >= 12) { m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { m2_frac = pll_state->mg_pll_bias & DKL_PLL_BIAS_FBDIV_FRAC_MASK; m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; } else { m2_frac = 0; } } else { m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { m2_frac = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK; m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; } else { m2_frac = 0; } } switch (pll_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: div1 = 2; break; case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: div1 = 3; break; case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: div1 = 5; break; case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: div1 = 7; break; default: MISSING_CASE(pll_state->mg_clktop2_hsclkctl); return 0; } div2 = (pll_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; /* div2 value of 0 is same as 1 means no div */ if (div2 == 0) div2 = 1; /* * Adjust the original formula to delay the division by 2^22 in order to * minimize possible rounding errors. */ tmp = (u64)m1 * m2_int * ref_clock + (((u64)m1 * m2_frac * ref_clock) >> 22); tmp = div_u64(tmp, 5 * div1 * div2); return tmp; } /** * icl_set_active_port_dpll - select the active port DPLL for a given CRTC * @crtc_state: state for the CRTC to select the DPLL for * @port_dpll_id: the active @port_dpll_id to select * * Select the given @port_dpll_id instance from the DPLLs reserved for the * CRTC. */ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, enum icl_port_dpll_id port_dpll_id) { struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id]; crtc_state->shared_dpll = port_dpll->pll; crtc_state->dpll_hw_state = port_dpll->hw_state; } static void icl_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_digital_port *primary_port; enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? enc_to_mst(encoder)->primary : enc_to_dig_port(encoder); if (primary_port && (intel_tc_port_in_dp_alt_mode(primary_port) || intel_tc_port_in_legacy_mode(primary_port))) port_dpll_id = ICL_PORT_DPLL_MG_PHY; icl_set_active_port_dpll(crtc_state, port_dpll_id); } static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; struct skl_wrpll_params pll_params = {}; int ret; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) ret = icl_calc_wrpll(crtc_state, &pll_params); else ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); if (ret) return ret; icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); /* this is mainly for the fastset check */ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT); crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL, &port_dpll->hw_state); return 0; } static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; enum port port = encoder->port; unsigned long dpll_mask; if (IS_ALDERLAKE_S(dev_priv)) { dpll_mask = BIT(DPLL_ID_DG1_DPLL3) | BIT(DPLL_ID_DG1_DPLL2) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); } else if (IS_DG1(dev_priv)) { if (port == PORT_D || port == PORT_E) { dpll_mask = BIT(DPLL_ID_DG1_DPLL2) | BIT(DPLL_ID_DG1_DPLL3); } else { dpll_mask = BIT(DPLL_ID_DG1_DPLL0) | BIT(DPLL_ID_DG1_DPLL1); } } else if (IS_ROCKETLAKE(dev_priv)) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); } else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && port != PORT_A) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); } else { dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); } /* Eliminate DPLLs from consideration if reserved by HTI */ dpll_mask &= ~intel_hti_dpll_mask(dev_priv); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, dpll_mask); if (!port_dpll->pll) return -EINVAL; intel_reference_shared_dpll(state, crtc, port_dpll->pll, &port_dpll->hw_state); icl_update_active_dpll(state, crtc, encoder); return 0; } static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; struct skl_wrpll_params pll_params = {}; int ret; port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; ret = icl_calc_tbt_pll(crtc_state, &pll_params); if (ret) return ret; icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state); if (ret) return ret; /* this is mainly for the fastset check */ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL, &port_dpll->hw_state); return 0; } static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; enum intel_dpll_id dpll_id; int ret; port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, BIT(DPLL_ID_ICL_TBTPLL)); if (!port_dpll->pll) return -EINVAL; intel_reference_shared_dpll(state, crtc, port_dpll->pll, &port_dpll->hw_state); port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, encoder->port)); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, BIT(dpll_id)); if (!port_dpll->pll) { ret = -EINVAL; goto err_unreference_tbt_pll; } intel_reference_shared_dpll(state, crtc, port_dpll->pll, &port_dpll->hw_state); icl_update_active_dpll(state, crtc, encoder); return 0; err_unreference_tbt_pll: port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; intel_unreference_shared_dpll(state, crtc, port_dpll->pll); return ret; } static int icl_compute_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) return icl_compute_combo_phy_dpll(state, crtc); else if (intel_phy_is_tc(dev_priv, phy)) return icl_compute_tc_phy_dplls(state, crtc); MISSING_CASE(phy); return 0; } static int icl_get_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) return icl_get_combo_phy_dpll(state, crtc, encoder); else if (intel_phy_is_tc(dev_priv, phy)) return icl_get_tc_phy_dplls(state, crtc, encoder); MISSING_CASE(phy); return -EINVAL; } static void icl_put_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum icl_port_dpll_id id; new_crtc_state->shared_dpll = NULL; for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { const struct icl_port_dpll *old_port_dpll = &old_crtc_state->icl_port_dplls[id]; struct icl_port_dpll *new_port_dpll = &new_crtc_state->icl_port_dplls[id]; new_port_dpll->pll = NULL; if (!old_port_dpll->pll) continue; intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll); } } static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; enum tc_port tc_port = icl_pll_id_to_tc_port(id); intel_wakeref_t wakeref; bool ret = false; u32 val; i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, enable_reg); if (!(val & PLL_ENABLE)) goto out; hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_coreclkctl1 = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; hw_state->mg_clktop2_hsclkctl = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port)); hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port)); hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port)); hw_state->mg_pll_frac_lock = intel_de_read(dev_priv, MG_PLL_FRAC_LOCK(tc_port)); hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port)); hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); if (dev_priv->display.dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { hw_state->mg_pll_tdc_coldst_bias_mask = -1U; hw_state->mg_pll_bias_mask = -1U; } hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { const enum intel_dpll_id id = pll->info->id; enum tc_port tc_port = icl_pll_id_to_tc_port(id); intel_wakeref_t wakeref; bool ret = false; u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll)); if (!(val & PLL_ENABLE)) goto out; /* * All registers read here have the same HIP_INDEX_REG even though * they are on different building blocks */ hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_hsclkctl = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_clktop2_coreclkctl1 = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port)); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port)); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port)); hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port)); hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); hw_state->mg_pll_tdc_coldst_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state, i915_reg_t enable_reg) { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; bool ret = false; u32 val; wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; val = intel_de_read(dev_priv, enable_reg); if (!(val & PLL_ENABLE)) goto out; if (IS_ALDERLAKE_S(dev_priv)) { hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id)); } else if (IS_DG1(dev_priv)) { hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id)); } else if (IS_ROCKETLAKE(dev_priv)) { hw_state->cfgcr0 = intel_de_read(dev_priv, RKL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, RKL_DPLL_CFGCR1(id)); } else if (DISPLAY_VER(dev_priv) >= 12) { hw_state->cfgcr0 = intel_de_read(dev_priv, TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, TGL_DPLL_CFGCR1(id)); if (dev_priv->display.vbt.override_afc_startup) { hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id)); hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK; } } else { if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && id == DPLL_ID_EHL_DPLL4) { hw_state->cfgcr0 = intel_de_read(dev_priv, ICL_DPLL_CFGCR0(4)); hw_state->cfgcr1 = intel_de_read(dev_priv, ICL_DPLL_CFGCR1(4)); } else { hw_state->cfgcr0 = intel_de_read(dev_priv, ICL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, ICL_DPLL_CFGCR1(id)); } } ret = true; out: intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); } static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE); } static void icl_dpll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG; if (IS_ALDERLAKE_S(dev_priv)) { cfgcr0_reg = ADLS_DPLL_CFGCR0(id); cfgcr1_reg = ADLS_DPLL_CFGCR1(id); } else if (IS_DG1(dev_priv)) { cfgcr0_reg = DG1_DPLL_CFGCR0(id); cfgcr1_reg = DG1_DPLL_CFGCR1(id); } else if (IS_ROCKETLAKE(dev_priv)) { cfgcr0_reg = RKL_DPLL_CFGCR0(id); cfgcr1_reg = RKL_DPLL_CFGCR1(id); } else if (DISPLAY_VER(dev_priv) >= 12) { cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); div0_reg = TGL_DPLL0_DIV0(id); } else { if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && id == DPLL_ID_EHL_DPLL4) { cfgcr0_reg = ICL_DPLL_CFGCR0(4); cfgcr1_reg = ICL_DPLL_CFGCR1(4); } else { cfgcr0_reg = ICL_DPLL_CFGCR0(id); cfgcr1_reg = ICL_DPLL_CFGCR1(id); } } intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup && !i915_mmio_reg_valid(div0_reg)); if (dev_priv->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); intel_de_posting_read(dev_priv, cfgcr1_reg); } static void icl_mg_pll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); /* * Some of the following registers have reserved fields, so program * these with RMW based on a mask. The mask can be fixed or generated * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port), MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl); intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK, hw_state->mg_clktop2_coreclkctl1); intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK, hw_state->mg_clktop2_hsclkctl); intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf); intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock); intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port), hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias); intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), hw_state->mg_pll_tdc_coldst_bias_mask, hw_state->mg_pll_tdc_coldst_bias); intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); } static void dkl_pll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); u32 val; /* * All registers programmed here have the same HIP_INDEX_REG even * though on different building block */ /* All the registers are RMW */ val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, hw_state->mg_pll_div0); val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port)); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); val |= hw_state->mg_pll_div1; intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val); val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port)); val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); val |= hw_state->mg_pll_ssc; intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val); val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port)); val &= ~(DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); val |= hw_state->mg_pll_bias; intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val); val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); val |= hw_state->mg_pll_tdc_coldst_bias; intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); } static void icl_pll_power_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE); /* * The spec says we need to "wait" but it also says it should be * immediate. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1)) drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", pll->info->id); } static void icl_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); /* Timeout is actually 600us. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id); } static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { u32 val; if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) || pll->info->id != DPLL_ID_ICL_DPLL0) return; /* * Wa_16011069516:adl-p[a0] * * All CMTG regs are unreliable until CMTG clock gating is disabled, * so we can only assume the default TRANS_CMTG_CHICKEN reg value and * sanity check this assumption with a double read, which presumably * returns the correct value even with clock gating on. * * Instead of the usual place for workarounds we apply this one here, * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled. */ val = intel_de_read(i915, TRANS_CMTG_CHICKEN); val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING); if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING)) drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val); } static void combo_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && pll->info->id == DPLL_ID_EHL_DPLL4) { /* * We need to disable DC states when this DPLL is enabled. * This can be done by taking a reference on DPLL4 power * domain. */ pll->wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); } icl_pll_power_enable(dev_priv, pll, enable_reg); icl_dpll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do * nothing here. */ icl_pll_enable(dev_priv, pll, enable_reg); adlp_cmtg_clock_gating_wa(dev_priv, pll); /* DVFS post sequence would be here. See the comment above. */ } static void tbt_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE); icl_dpll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do * nothing here. */ icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE); /* DVFS post sequence would be here. See the comment above. */ } static void mg_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); icl_pll_power_enable(dev_priv, pll, enable_reg); if (DISPLAY_VER(dev_priv) >= 12) dkl_pll_write(dev_priv, pll); else icl_mg_pll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do * nothing here. */ icl_pll_enable(dev_priv, pll, enable_reg); /* DVFS post sequence would be here. See the comment above. */ } static void icl_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { /* The first steps are done by intel_ddi_post_disable(). */ /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do * nothing here. */ intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0); /* Timeout is actually 1us. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0); /* * The spec says we need to "wait" but it also says it should be * immediate. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1)) drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", pll->info->id); } static void combo_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); icl_pll_disable(dev_priv, pll, enable_reg); if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && pll->info->id == DPLL_ID_EHL_DPLL4) intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, pll->wakeref); } static void tbt_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE); } static void mg_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); icl_pll_disable(dev_priv, pll, enable_reg); } static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, " "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0, hw_state->mg_refclkin_ctl, hw_state->mg_clktop2_coreclkctl1, hw_state->mg_clktop2_hsclkctl, hw_state->mg_pll_div0, hw_state->mg_pll_div1, hw_state->mg_pll_lf, hw_state->mg_pll_frac_lock, hw_state->mg_pll_ssc, hw_state->mg_pll_bias, hw_state->mg_pll_tdc_coldst_bias); } static const struct intel_shared_dpll_funcs combo_pll_funcs = { .enable = combo_pll_enable, .disable = combo_pll_disable, .get_hw_state = combo_pll_get_hw_state, .get_freq = icl_ddi_combo_pll_get_freq, }; static const struct intel_shared_dpll_funcs tbt_pll_funcs = { .enable = tbt_pll_enable, .disable = tbt_pll_disable, .get_hw_state = tbt_pll_get_hw_state, .get_freq = icl_ddi_tbt_pll_get_freq, }; static const struct intel_shared_dpll_funcs mg_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = mg_pll_get_hw_state, .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info icl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, { }, }; static const struct intel_dpll_mgr icl_pll_mgr = { .dpll_info = icl_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info ehl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, { }, }; static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct intel_shared_dpll_funcs dkl_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = dkl_pll_get_hw_state, .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info tgl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 }, { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 }, { }, }; static const struct intel_dpll_mgr tgl_pll_mgr = { .dpll_info = tgl_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info rkl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, { }, }; static const struct intel_dpll_mgr rkl_pll_mgr = { .dpll_info = rkl_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info dg1_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 }, { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, { }, }; static const struct intel_dpll_mgr dg1_pll_mgr = { .dpll_info = dg1_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info adls_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, { }, }; static const struct intel_dpll_mgr adls_pll_mgr = { .dpll_info = adls_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; static const struct dpll_info adlp_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, { }, }; static const struct intel_dpll_mgr adlp_pll_mgr = { .dpll_info = adlp_plls, .compute_dplls = icl_compute_dplls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev_priv: i915 device * * Initialize shared DPLLs for @dev_priv. */ void intel_shared_dpll_init(struct drm_i915_private *dev_priv) { const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; mutex_init(&dev_priv->display.dpll.lock); if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) /* No shared DPLLs on DG2; port PLLs are part of the PHY */ dpll_mgr = NULL; else if (IS_ALDERLAKE_P(dev_priv)) dpll_mgr = &adlp_pll_mgr; else if (IS_ALDERLAKE_S(dev_priv)) dpll_mgr = &adls_pll_mgr; else if (IS_DG1(dev_priv)) dpll_mgr = &dg1_pll_mgr; else if (IS_ROCKETLAKE(dev_priv)) dpll_mgr = &rkl_pll_mgr; else if (DISPLAY_VER(dev_priv) >= 12) dpll_mgr = &tgl_pll_mgr; else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) dpll_mgr = &ehl_pll_mgr; else if (DISPLAY_VER(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) dpll_mgr = &bxt_pll_mgr; else if (DISPLAY_VER(dev_priv) == 9) dpll_mgr = &skl_pll_mgr; else if (HAS_DDI(dev_priv)) dpll_mgr = &hsw_pll_mgr; else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) dpll_mgr = &pch_pll_mgr; if (!dpll_mgr) { dev_priv->display.dpll.num_shared_dpll = 0; return; } dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { if (drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls))) break; drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i]; } dev_priv->display.dpll.mgr = dpll_mgr; dev_priv->display.dpll.num_shared_dpll = i; } /** * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination * @state: atomic state * @crtc: CRTC to compute DPLLs for * @encoder: encoder * * This function computes the DPLL state for the given CRTC and encoder. * * The new configuration in the atomic commit @state is made effective by * calling intel_shared_dpll_swap_state(). * * Returns: * 0 on success, negative error code on falure. */ int intel_compute_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; return dpll_mgr->compute_dplls(state, crtc, encoder); } /** * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination * @state: atomic state * @crtc: CRTC to reserve DPLLs for * @encoder: encoder * * This function reserves all required DPLLs for the given CRTC and encoder * combination in the current atomic commit @state and the new @crtc atomic * state. * * The new configuration in the atomic commit @state is made effective by * calling intel_shared_dpll_swap_state(). * * The reserved DPLLs should be released by calling * intel_release_shared_dplls(). * * Returns: * 0 if all required DPLLs were successfully reserved, * negative error code otherwise. */ int intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; return dpll_mgr->get_dplls(state, crtc, encoder); } /** * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state * @state: atomic state * @crtc: crtc from which the DPLLs are to be released * * This function releases all DPLLs reserved by intel_reserve_shared_dplls() * from the current atomic commit @state and the old @crtc atomic state. * * The new configuration in the atomic commit @state is made effective by * calling intel_shared_dpll_swap_state(). */ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; /* * FIXME: this function is called for every platform having a * compute_clock hook, even though the platform doesn't yet support * the shared DPLL framework and intel_reserve_shared_dplls() is not * called on those. */ if (!dpll_mgr) return; dpll_mgr->put_dplls(state, crtc); } /** * intel_update_active_dpll - update the active DPLL for a CRTC/encoder * @state: atomic state * @crtc: the CRTC for which to update the active DPLL * @encoder: encoder determining the type of port DPLL * * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state, * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The * DPLL selected will be based on the current mode of the encoder's port. */ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return; dpll_mgr->update_active_dpll(state, crtc, encoder); } /** * intel_dpll_get_freq - calculate the DPLL's output frequency * @i915: i915 device * @pll: DPLL for which to calculate the output frequency * @pll_state: DPLL state from which to calculate the output frequency * * Return the output frequency corresponding to @pll's passed in @pll_state. */ int intel_dpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq)) return 0; return pll->info->funcs->get_freq(i915, pll, pll_state); } /** * intel_dpll_get_hw_state - readout the DPLL's hardware state * @i915: i915 device * @pll: DPLL for which to calculate the output frequency * @hw_state: DPLL's hardware state * * Read out @pll's hardware state into @hw_state. */ bool intel_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { return pll->info->funcs->get_hw_state(i915, pll, hw_state); } static void readout_dpll_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { struct intel_crtc *crtc; pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state); if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { pll->wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF); } pll->state.pipe_mask = 0; for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); if (crtc_state->hw.active && crtc_state->shared_dpll == pll) intel_reference_shared_dpll_crtc(crtc, pll, &pll->state); } pll->active_mask = pll->state.pipe_mask; drm_dbg_kms(&i915->drm, "%s hw state readout: pipe_mask 0x%x, on %i\n", pll->info->name, pll->state.pipe_mask, pll->on); } void intel_dpll_update_ref_clks(struct drm_i915_private *i915) { if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks) i915->display.dpll.mgr->update_ref_clks(i915); } void intel_dpll_readout_hw_state(struct drm_i915_private *i915) { int i; for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]); } static void sanitize_dpll_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { if (!pll->on) return; adlp_cmtg_clock_gating_wa(i915, pll); if (pll->active_mask) return; drm_dbg_kms(&i915->drm, "%s enabled but not in use, disabling\n", pll->info->name); pll->info->funcs->disable(i915, pll); pll->on = false; } void intel_dpll_sanitize_state(struct drm_i915_private *i915) { int i; for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]); } /** * intel_dpll_dump_hw_state - write hw_state to dmesg * @dev_priv: i915 drm device * @hw_state: hw state to be written to the log * * Write the relevant values in @hw_state to dmesg using drm_dbg_kms. */ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { if (dev_priv->display.dpll.mgr) { dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure */ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", hw_state->dpll, hw_state->dpll_md, hw_state->fp0, hw_state->fp1); } } static void verify_single_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state) { struct intel_dpll_hw_state dpll_hw_state; u8 pipe_mask; bool active; memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask, "pll is on but not used by any active pipe\n"); I915_STATE_WARN(dev_priv, pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); } if (!crtc) { I915_STATE_WARN(dev_priv, pll->active_mask & ~pll->state.pipe_mask, "more active pll users than references: 0x%x vs 0x%x\n", pll->active_mask, pll->state.pipe_mask); return; } pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask), "pll active mismatch (expected pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); else I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", pipe_mask, pll->state.pipe_mask); I915_STATE_WARN(dev_priv, pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, sizeof(dpll_hw_state)), "pll hw state mismatch\n"); } void intel_shared_dpll_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (new_crtc_state->shared_dpll) verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); if (old_crtc_state->shared_dpll && old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", pipe_name(crtc->pipe), pll->active_mask); I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask, "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n", pipe_name(crtc->pipe), pll->state.pipe_mask); } } void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915) { int i; for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i], NULL, NULL); }
linux-master
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
/* * Copyright © 2013 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Author: Damien Lespiau <[email protected]> * */ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "i915_irq.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_pipe_crc.h" static const char * const pipe_crc_sources[] = { [INTEL_PIPE_CRC_SOURCE_NONE] = "none", [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1", [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2", [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3", [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4", [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5", [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6", [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7", [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe", [INTEL_PIPE_CRC_SOURCE_TV] = "TV", [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B", [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C", [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D", [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto", }; static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: return -EINVAL; } return 0; } static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source) { struct intel_encoder *encoder; struct intel_crtc *crtc; struct intel_digital_port *dig_port; *source = INTEL_PIPE_CRC_SOURCE_PIPE; drm_modeset_lock_all(&dev_priv->drm); for_each_intel_encoder(&dev_priv->drm, encoder) { if (!encoder->base.crtc) continue; crtc = to_intel_crtc(encoder->base.crtc); if (crtc->pipe != pipe) continue; switch (encoder->type) { case INTEL_OUTPUT_TVOUT: *source = INTEL_PIPE_CRC_SOURCE_TV; break; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: dig_port = enc_to_dig_port(encoder); switch (dig_port->base.port) { case PORT_B: *source = INTEL_PIPE_CRC_SOURCE_DP_B; break; case PORT_C: *source = INTEL_PIPE_CRC_SOURCE_DP_C; break; case PORT_D: *source = INTEL_PIPE_CRC_SOURCE_DP_D; break; default: drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n", port_name(dig_port->base.port)); break; } break; default: break; } } drm_modeset_unlock_all(&dev_priv->drm); } static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) i9xx_pipe_crc_auto_source(dev_priv, pipe, source); switch (*source) { case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; break; case INTEL_PIPE_CRC_SOURCE_DP_B: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; need_stable_symbols = true; break; case INTEL_PIPE_CRC_SOURCE_DP_C: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; need_stable_symbols = true; break; case INTEL_PIPE_CRC_SOURCE_DP_D: if (!IS_CHERRYVIEW(dev_priv)) return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; need_stable_symbols = true; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: return -EINVAL; } /* * When the pipe CRC tap point is after the transcoders we need * to tweak symbol-level features to produce a deterministic series of * symbols for a given frame. We need to reset those features only once * a frame (instead of every nth symbol): * - DC-balance: used to ensure a better clock recovery from the data * link (SDVO) * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X); tmp |= DC_BALANCE_RESET_VLV; switch (pipe) { case PIPE_A: tmp |= PIPE_A_SCRAMBLE_RESET; break; case PIPE_B: tmp |= PIPE_B_SCRAMBLE_RESET; break; case PIPE_C: tmp |= PIPE_C_SCRAMBLE_RESET; break; default: return -EINVAL; } intel_de_write(dev_priv, PORT_DFT2_G4X, tmp); } return 0; } static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) i9xx_pipe_crc_auto_source(dev_priv, pipe, source); switch (*source) { case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; break; case INTEL_PIPE_CRC_SOURCE_TV: if (!SUPPORTS_TV(dev_priv)) return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: /* * The DP CRC source doesn't work on g4x. * It can be made to work to some degree by selecting * the correct CRC source before the port is enabled, * and not touching the CRC source bits again until * the port is disabled. But even then the bits * eventually get stuck and a reboot is needed to get * working CRCs on the pipe again. Let's simply * refuse to use DP CRCs on g4x. */ return -EINVAL; } return 0; } static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X); switch (pipe) { case PIPE_A: tmp &= ~PIPE_A_SCRAMBLE_RESET; break; case PIPE_B: tmp &= ~PIPE_B_SCRAMBLE_RESET; break; case PIPE_C: tmp &= ~PIPE_C_SCRAMBLE_RESET; break; default: return; } if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) tmp &= ~DC_BALANCE_RESET_VLV; intel_de_write(dev_priv, PORT_DFT2_G4X, tmp); } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; break; case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; break; case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: return -EINVAL; } return 0; } static void intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; int ret; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) { ret = -ENOMEM; goto unlock; } state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; retry: pipe_config = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(pipe_config)) { ret = PTR_ERR(pipe_config); goto put_state; } pipe_config->uapi.mode_changed = pipe_config->has_psr; pipe_config->crc_enabled = enable; if (IS_HASWELL(dev_priv) && pipe_config->hw.active && crtc->pipe == PIPE_A && pipe_config->cpu_transcoder == TRANSCODER_EDP) pipe_config->uapi.mode_changed = true; ret = drm_atomic_commit(state); put_state: if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } drm_atomic_state_put(state); unlock: drm_WARN(&dev_priv->drm, ret, "Toggling workaround to %i returns %i\n", enable, ret); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; break; case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: return -EINVAL; } return 0; } static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE3: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE4: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE5: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE6: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL; break; case INTEL_PIPE_CRC_SOURCE_PLANE7: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL; break; case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL; break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: return -EINVAL; } return 0; } static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { if (DISPLAY_VER(dev_priv) == 2) return i8xx_pipe_crc_ctl_reg(source, val); else if (DISPLAY_VER(dev_priv) < 5) return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv)) return ilk_pipe_crc_ctl_reg(source, val); else if (DISPLAY_VER(dev_priv) < 9) return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val); } static int display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) { int i; if (!buf) { *s = INTEL_PIPE_CRC_SOURCE_NONE; return 0; } i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf); if (i < 0) return i; *s = i; return 0; } void intel_crtc_crc_init(struct intel_crtc *crtc) { struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; spin_lock_init(&pipe_crc->lock); } static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_TV: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int vlv_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_DP_B: case INTEL_PIPE_CRC_SOURCE_DP_C: case INTEL_PIPE_CRC_SOURCE_DP_D: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int ilk_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_PLANE1: case INTEL_PIPE_CRC_SOURCE_PLANE2: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int ivb_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_PLANE1: case INTEL_PIPE_CRC_SOURCE_PLANE2: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int skl_crc_source_valid(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_PLANE1: case INTEL_PIPE_CRC_SOURCE_PLANE2: case INTEL_PIPE_CRC_SOURCE_PLANE3: case INTEL_PIPE_CRC_SOURCE_PLANE4: case INTEL_PIPE_CRC_SOURCE_PLANE5: case INTEL_PIPE_CRC_SOURCE_PLANE6: case INTEL_PIPE_CRC_SOURCE_PLANE7: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: return -EINVAL; } } static int intel_is_valid_crc_source(struct drm_i915_private *dev_priv, const enum intel_pipe_crc_source source) { if (DISPLAY_VER(dev_priv) == 2) return i8xx_crc_source_valid(dev_priv, source); else if (DISPLAY_VER(dev_priv) < 5) return i9xx_crc_source_valid(dev_priv, source); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_crc_source_valid(dev_priv, source); else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv)) return ilk_crc_source_valid(dev_priv, source); else if (DISPLAY_VER(dev_priv) < 9) return ivb_crc_source_valid(dev_priv, source); else return skl_crc_source_valid(dev_priv, source); } const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count) { *count = ARRAY_SIZE(pipe_crc_sources); return pipe_crc_sources; } int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_pipe_crc_source source; if (display_crc_ctl_parse_source(source_name, &source) < 0) { drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); return -EINVAL; } if (source == INTEL_PIPE_CRC_SOURCE_AUTO || intel_is_valid_crc_source(dev_priv, source) == 0) { *values_cnt = 5; return 0; } return -EINVAL; } int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name) { struct intel_crtc *crtc = to_intel_crtc(_crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; u32 val = 0; /* shut up gcc */ int ret = 0; bool enable; if (display_crc_ctl_parse_source(source_name, &source) < 0) { drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); return -EINVAL; } power_domain = POWER_DOMAIN_PIPE(pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) { drm_dbg_kms(&dev_priv->drm, "Trying to capture CRC while pipe is off\n"); return -EIO; } enable = source != INTEL_PIPE_CRC_SOURCE_NONE; if (enable) intel_crtc_crc_setup_workarounds(crtc, true); ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val); if (ret != 0) goto out; pipe_crc->source = source; intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val); intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe)); if (!source) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_undo_pipe_scramble_reset(dev_priv, pipe); } pipe_crc->skipped = 0; out: if (!enable) intel_crtc_crc_setup_workarounds(crtc, false); intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum pipe pipe = crtc->pipe; u32 val = 0; if (!crtc->base.crc.opened) return; if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0) return; /* Don't need pipe_crc->lock here, IRQs are not generated. */ pipe_crc->skipped = 0; intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val); intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe)); } void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum pipe pipe = crtc->pipe; /* Swallow crc's until we stop generating them. */ spin_lock_irq(&pipe_crc->lock); pipe_crc->skipped = INT_MIN; spin_unlock_irq(&pipe_crc->lock); intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), 0); intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe)); intel_synchronize_irq(dev_priv); }
linux-master
drivers/gpu/drm/i915/display/intel_pipe_crc.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include "i915_drv.h" #include "i9xx_wm.h" #include "intel_display_types.h" #include "intel_wm.h" #include "skl_watermark.h" /** * intel_update_watermarks - update FIFO watermark values based on current modes * @i915: i915 device * * Calculate watermark values for the various WM regs based on current mode * and plane configuration. * * There are several cases to deal with here: * - normal (i.e. non-self-refresh) * - self-refresh (SR) mode * - lines are large relative to FIFO size (buffer can hold up to 2) * - lines are small relative to FIFO size (buffer can hold more than 2 * lines), so need to account for TLB latency * * The normal calculation is: * watermark = dotclock * bytes per pixel * latency * where latency is platform & configuration dependent (we assume pessimal * values here). * * The SR calculation is: * watermark = (trunc(latency/line time)+1) * surface width * * bytes per pixel * where * line time = htotal / dotclock * surface width = hdisplay for normal plane and 64 for cursor * and latency is assumed to be high, as above. * * The final value programmed to the register should always be rounded up, * and include an extra 2 entries to account for clock crossings. * * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. */ void intel_update_watermarks(struct drm_i915_private *i915) { if (i915->display.funcs.wm->update_wm) i915->display.funcs.wm->update_wm(i915); } int intel_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (i915->display.funcs.wm->compute_pipe_wm) return i915->display.funcs.wm->compute_pipe_wm(state, crtc); return 0; } int intel_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (!i915->display.funcs.wm->compute_intermediate_wm) return 0; if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) return 0; return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); } bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (i915->display.funcs.wm->initial_watermarks) { i915->display.funcs.wm->initial_watermarks(state, crtc); return true; } return false; } void intel_atomic_update_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (i915->display.funcs.wm->atomic_update_watermarks) i915->display.funcs.wm->atomic_update_watermarks(state, crtc); } void intel_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (i915->display.funcs.wm->optimize_watermarks) i915->display.funcs.wm->optimize_watermarks(state, crtc); } int intel_compute_global_watermarks(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (i915->display.funcs.wm->compute_global_watermarks) return i915->display.funcs.wm->compute_global_watermarks(state); return 0; } void intel_wm_get_hw_state(struct drm_i915_private *i915) { if (i915->display.funcs.wm->get_hw_state) return i915->display.funcs.wm->get_hw_state(i915); } bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); /* FIXME check the 'enable' instead */ if (!crtc_state->hw.active) return false; /* * Treat cursor with fb as always visible since cursor updates * can happen faster than the vrefresh rate, and the current * watermark code doesn't handle that correctly. Cursor updates * which set/clear the fb or change the cursor size are going * to get throttled by intel_legacy_cursor_update() to work * around this problem with the watermark code. */ if (plane->id == PLANE_CURSOR) return plane_state->hw.fb != NULL; else return plane_state->uapi.visible; } void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, const u16 wm[]) { int level; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { unsigned int latency = wm[level]; if (latency == 0) { drm_dbg_kms(&dev_priv->drm, "%s WM%d latency not provided\n", name, level); continue; } /* * - latencies are in us on gen9. * - before then, WM1+ latency values are in 0.5us units */ if (DISPLAY_VER(dev_priv) >= 9) latency *= 10; else if (level > 0) latency *= 5; drm_dbg_kms(&dev_priv->drm, "%s WM%d latency %u (%u.%u usec)\n", name, level, wm[level], latency / 10, latency % 10); } } void intel_wm_init(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 9) skl_wm_init(i915); else i9xx_wm_init(i915); } static void wm_latency_show(struct seq_file *m, const u16 wm[8]) { struct drm_i915_private *dev_priv = m->private; int level; drm_modeset_lock_all(&dev_priv->drm); for (level = 0; level < dev_priv->display.wm.num_levels; level++) { unsigned int latency = wm[level]; /* * - WM1+ latency values in 0.5us units * - latencies are in us on gen9/vlv/chv */ if (DISPLAY_VER(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || IS_G4X(dev_priv)) latency *= 10; else if (level > 0) latency *= 5; seq_printf(m, "WM%d %u (%u.%u usec)\n", level, wm[level], latency / 10, latency % 10); } drm_modeset_unlock_all(&dev_priv->drm); } static int pri_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.pri_latency; wm_latency_show(m, latencies); return 0; } static int spr_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.spr_latency; wm_latency_show(m, latencies); return 0; } static int cur_wm_latency_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.cur_latency; wm_latency_show(m, latencies); return 0; } static int pri_wm_latency_open(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) return -ENODEV; return single_open(file, pri_wm_latency_show, dev_priv); } static int spr_wm_latency_open(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; if (HAS_GMCH(dev_priv)) return -ENODEV; return single_open(file, spr_wm_latency_show, dev_priv); } static int cur_wm_latency_open(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; if (HAS_GMCH(dev_priv)) return -ENODEV; return single_open(file, cur_wm_latency_show, dev_priv); } static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp, u16 wm[8]) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; u16 new[8] = { 0 }; int level; int ret; char tmp[32]; if (len >= sizeof(tmp)) return -EINVAL; if (copy_from_user(tmp, ubuf, len)) return -EFAULT; tmp[len] = '\0'; ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4], &new[5], &new[6], &new[7]); if (ret != dev_priv->display.wm.num_levels) return -EINVAL; drm_modeset_lock_all(&dev_priv->drm); for (level = 0; level < dev_priv->display.wm.num_levels; level++) wm[level] = new[level]; drm_modeset_unlock_all(&dev_priv->drm); return len; } static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.pri_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.spr_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) latencies = dev_priv->display.wm.skl_latency; else latencies = dev_priv->display.wm.cur_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } static const struct file_operations i915_pri_wm_latency_fops = { .owner = THIS_MODULE, .open = pri_wm_latency_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = pri_wm_latency_write }; static const struct file_operations i915_spr_wm_latency_fops = { .owner = THIS_MODULE, .open = spr_wm_latency_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = spr_wm_latency_write }; static const struct file_operations i915_cur_wm_latency_fops = { .owner = THIS_MODULE, .open = cur_wm_latency_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = cur_wm_latency_write }; void intel_wm_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, i915, &i915_pri_wm_latency_fops); debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, i915, &i915_spr_wm_latency_fops); debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, i915, &i915_cur_wm_latency_fops); skl_watermark_debugfs_register(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_wm.c
/* * Copyright © 2011 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Jesse Barnes <[email protected]> * * New plane/sprite handling. * * The older chips had a separate interface for programming plane related * registers; newer ones are much simpler and we can use the new DRM plane * support. */ #include <linux/string_helpers.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_fourcc.h> #include <drm/drm_rect.h> #include "i915_drv.h" #include "i915_reg.h" #include "i9xx_plane.h" #include "intel_atomic_plane.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_sprite.h" static void i9xx_plane_linear_gamma(u16 gamma[8]) { /* The points are not evenly spaced. */ static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 }; int i; for (i = 0; i < 8; i++) gamma[i] = (in[i] << 8) / 32; } static void chv_sprite_update_csc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id plane_id = plane->id; /* * |r| | c0 c1 c2 | |cr| * |g| = | c3 c4 c5 | x |y | * |b| | c6 c7 c8 | |cb| * * Coefficients are s3.12. * * Cb and Cr apparently come in as signed already, and * we always get full range data in on account of CLRC0/1. */ static const s16 csc_matrix[][9] = { /* BT.601 full range YCbCr -> full range RGB */ [DRM_COLOR_YCBCR_BT601] = { 5743, 4096, 0, -2925, 4096, -1410, 0, 4096, 7258, }, /* BT.709 full range YCbCr -> full range RGB */ [DRM_COLOR_YCBCR_BT709] = { 6450, 4096, 0, -1917, 4096, -767, 0, 4096, 7601, }, }; const s16 *csc = csc_matrix[plane_state->hw.color_encoding]; /* Seems RGB data bypasses the CSC always */ if (!fb->format->is_yuv) return; intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); intel_de_write_fw(dev_priv, SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0])); intel_de_write_fw(dev_priv, SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2])); intel_de_write_fw(dev_priv, SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4])); intel_de_write_fw(dev_priv, SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6])); intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8])); intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0)); intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } #define SIN_0 0 #define COS_0 1 static void vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; int contrast, brightness, sh_scale, sh_sin, sh_cos; if (fb->format->is_yuv && plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) { /* * Expand limited range to full range: * Contrast is applied first and is used to expand Y range. * Brightness is applied second and is used to remove the * offset from Y. Saturation/hue is used to expand CbCr range. */ contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16); brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16); sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128); sh_sin = SIN_0 * sh_scale; sh_cos = COS_0 * sh_scale; } else { /* Pass-through everything. */ contrast = 1 << 6; brightness = 0; sh_scale = 1 << 7; sh_sin = SIN_0 * sh_scale; sh_cos = COS_0 * sh_scale; } /* FIXME these register are single buffered :( */ intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id), SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness)); intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id), SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } static void vlv_plane_ratio(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp = fb->format->cpp[0]; /* * VLV bspec only considers cases where all three planes are * enabled, and cases where the primary and one sprite is enabled. * Let's assume the case with just two sprites enabled also * maps to the latter case. */ if (hweight8(active_planes) == 3) { switch (cpp) { case 8: *num = 11; *den = 8; break; case 4: *num = 18; *den = 16; break; default: *num = 1; *den = 1; break; } } else if (hweight8(active_planes) == 2) { switch (cpp) { case 8: *num = 10; *den = 8; break; case 4: *num = 17; *den = 16; break; default: *num = 1; *den = 1; break; } } else { switch (cpp) { case 8: *num = 10; *den = 8; break; default: *num = 1; *den = 1; break; } } } int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate; unsigned int num, den; /* * Note that crtc_state->pixel_rate accounts for both * horizontal and vertical panel fitter downscaling factors. * Pre-HSW bspec tells us to only consider the horizontal * downscaling factor here. We ignore that and just consider * both for simplicity. */ pixel_rate = crtc_state->pixel_rate; vlv_plane_ratio(crtc_state, plane_state, &num, &den); return DIV_ROUND_UP(pixel_rate * num, den); } static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; if (crtc_state->gamma_enable) sprctl |= SP_PIPE_GAMMA_ENABLE; return sprctl; } static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; sprctl = SP_ENABLE; switch (fb->format->format) { case DRM_FORMAT_YUYV: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV; break; case DRM_FORMAT_YVYU: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU; break; case DRM_FORMAT_UYVY: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY; break; case DRM_FORMAT_VYUY: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY; break; case DRM_FORMAT_C8: sprctl |= SP_FORMAT_8BPP; break; case DRM_FORMAT_RGB565: sprctl |= SP_FORMAT_BGR565; break; case DRM_FORMAT_XRGB8888: sprctl |= SP_FORMAT_BGRX8888; break; case DRM_FORMAT_ARGB8888: sprctl |= SP_FORMAT_BGRA8888; break; case DRM_FORMAT_XBGR2101010: sprctl |= SP_FORMAT_RGBX1010102; break; case DRM_FORMAT_ABGR2101010: sprctl |= SP_FORMAT_RGBA1010102; break; case DRM_FORMAT_XRGB2101010: sprctl |= SP_FORMAT_BGRX1010102; break; case DRM_FORMAT_ARGB2101010: sprctl |= SP_FORMAT_BGRA1010102; break; case DRM_FORMAT_XBGR8888: sprctl |= SP_FORMAT_RGBX8888; break; case DRM_FORMAT_ABGR8888: sprctl |= SP_FORMAT_RGBA8888; break; default: MISSING_CASE(fb->format->format); return 0; } if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SP_YUV_FORMAT_BT709; if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SP_TILED; if (rotation & DRM_MODE_ROTATE_180) sprctl |= SP_ROTATE_180; if (rotation & DRM_MODE_REFLECT_X) sprctl |= SP_MIRROR; if (key->flags & I915_SET_COLORKEY_SOURCE) sprctl |= SP_SOURCE_KEY; return sprctl; } static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; u16 gamma[8]; int i; /* Seems RGB data bypasses the gamma always */ if (!fb->format->is_yuv) return; i9xx_plane_linear_gamma(gamma); /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1), gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } static void vlv_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), SP_POS_Y(crtc_y) | SP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1)); } static void vlv_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; u32 sprctl, linear_offset; sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) chv_sprite_update_csc(plane_state); if (key->flags) { intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), key->min_value); intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id), key->channel_mask); intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id), key->max_value); } intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), SP_OFFSET_Y(y) | SP_OFFSET_X(x)); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl); intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); vlv_sprite_update_clrc(plane_state); vlv_sprite_update_gamma(plane_state); } static void vlv_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0); intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0); } static bool vlv_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp = fb->format->cpp[0]; if (hweight8(active_planes) == 2) { switch (cpp) { case 8: *num = 10; *den = 8; break; case 4: *num = 17; *den = 16; break; default: *num = 1; *den = 1; break; } } else { switch (cpp) { case 8: *num = 9; *den = 8; break; default: *num = 1; *den = 1; break; } } } static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp = fb->format->cpp[0]; switch (cpp) { case 8: *num = 12; *den = 8; break; case 4: *num = 19; *den = 16; break; case 2: *num = 33; *den = 32; break; default: *num = 1; *den = 1; break; } } int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate; unsigned int num, den; /* * Note that crtc_state->pixel_rate accounts for both * horizontal and vertical panel fitter downscaling factors. * Pre-HSW bspec tells us to only consider the horizontal * downscaling factor here. We ignore that and just consider * both for simplicity. */ pixel_rate = crtc_state->pixel_rate; ivb_plane_ratio(crtc_state, plane_state, &num, &den); return DIV_ROUND_UP(pixel_rate * num, den); } static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int src_w, dst_w, pixel_rate; unsigned int num, den; /* * Note that crtc_state->pixel_rate accounts for both * horizontal and vertical panel fitter downscaling factors. * Pre-HSW bspec tells us to only consider the horizontal * downscaling factor here. We ignore that and just consider * both for simplicity. */ pixel_rate = crtc_state->pixel_rate; src_w = drm_rect_width(&plane_state->uapi.src) >> 16; dst_w = drm_rect_width(&plane_state->uapi.dst); if (src_w != dst_w) ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den); else ivb_plane_ratio(crtc_state, plane_state, &num, &den); /* Horizontal downscaling limits the maximum pixel rate */ dst_w = min(src_w, dst_w); return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w), den * dst_w); } static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp = fb->format->cpp[0]; if (hweight8(active_planes) == 2) { switch (cpp) { case 8: *num = 10; *den = 8; break; default: *num = 1; *den = 1; break; } } else { switch (cpp) { case 8: *num = 9; *den = 8; break; default: *num = 1; *den = 1; break; } } } int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate = crtc_state->pixel_rate; unsigned int num, den; hsw_plane_ratio(crtc_state, plane_state, &num, &den); return DIV_ROUND_UP(pixel_rate * num, den); } static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; if (crtc_state->gamma_enable) sprctl |= SPRITE_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) sprctl |= SPRITE_PIPE_CSC_ENABLE; return sprctl; } static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; return fb->format->cpp[0] == 8 && (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)); } static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; sprctl = SPRITE_ENABLE; if (IS_IVYBRIDGE(dev_priv)) sprctl |= SPRITE_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; break; case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; break; case DRM_FORMAT_XBGR2101010: sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX; break; case DRM_FORMAT_XRGB2101010: sprctl |= SPRITE_FORMAT_RGBX101010; break; case DRM_FORMAT_XBGR16161616F: sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX; break; case DRM_FORMAT_XRGB16161616F: sprctl |= SPRITE_FORMAT_RGBX161616; break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; break; case DRM_FORMAT_YVYU: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; break; case DRM_FORMAT_UYVY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; break; case DRM_FORMAT_VYUY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; break; default: MISSING_CASE(fb->format->format); return 0; } if (!ivb_need_sprite_gamma(plane_state)) sprctl |= SPRITE_PLANE_GAMMA_DISABLE; if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE; if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SPRITE_TILED; if (rotation & DRM_MODE_ROTATE_180) sprctl |= SPRITE_ROTATE_180; if (key->flags & I915_SET_COLORKEY_DESTINATION) sprctl |= SPRITE_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) sprctl |= SPRITE_SOURCE_KEY; return sprctl; } static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, u16 gamma[18]) { int scale, i; /* * WaFP16GammaEnabling:ivb,hsw * "Workaround : When using the 64-bit format, the sprite output * on each color channel has one quarter amplitude. It can be * brought up to full amplitude by using sprite internal gamma * correction, pipe gamma correction, or pipe color space * conversion to multiply the sprite output by four." */ scale = 4; for (i = 0; i < 16; i++) gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1); gamma[i] = min((scale * i << 10) / 16, 1 << 10); i++; gamma[i] = 3 << 10; i++; } static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u16 gamma[18]; int i; if (!ivb_need_sprite_gamma(plane_state)) return; ivb_sprite_linear_gamma(plane_state, gamma); /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) intel_de_write_fw(dev_priv, SPRGAMC(pipe, i), gamma[i] << 20 | gamma[i] << 10 | gamma[i]); intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]); intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]); intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]); i++; intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]); intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]); intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]); i++; } static void ivb_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u32 sprscale = 0; if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | SPRITE_SRC_WIDTH(src_w - 1) | SPRITE_SRC_HEIGHT(src_h - 1); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPRPOS(pipe), SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x)); intel_de_write_fw(dev_priv, SPRSIZE(pipe), SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); } static void ivb_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; u32 sprctl, linear_offset; sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (key->flags) { intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), key->channel_mask); intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value); } /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { intel_de_write_fw(dev_priv, SPROFFSET(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } else { intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset); intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl); intel_de_write_fw(dev_priv, SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); ivb_sprite_update_gamma(plane_state); } static void ivb_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; intel_de_write_fw(dev_priv, SPRCTL(pipe), 0); /* Disable the scaler */ if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0); intel_de_write_fw(dev_priv, SPRSURF(pipe), 0); } static bool ivb_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE; *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int hscale, pixel_rate; unsigned int limit, decimate; /* * Note that crtc_state->pixel_rate accounts for both * horizontal and vertical panel fitter downscaling factors. * Pre-HSW bspec tells us to only consider the horizontal * downscaling factor here. We ignore that and just consider * both for simplicity. */ pixel_rate = crtc_state->pixel_rate; /* Horizontal downscaling limits the maximum pixel rate */ hscale = drm_rect_calc_hscale(&plane_state->uapi.src, &plane_state->uapi.dst, 0, INT_MAX); hscale = max(hscale, 0x10000u); /* Decimation steps at 2x,4x,8x,16x */ decimate = ilog2(hscale >> 16); hscale >>= decimate; /* Starting limit is 90% of cdclk */ limit = 9; /* -10% per decimation step */ limit -= decimate; /* -10% for RGB */ if (!fb->format->is_yuv) limit--; /* * We should also do -10% if sprite scaling is enabled * on the other pipe, but we can't really check for that, * so we ignore it. */ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale), limit << 16); } static unsigned int g4x_sprite_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ if (modifier == I915_FORMAT_MOD_X_TILED) return min(4096 * cpp, 16 * 1024); else return 16 * 1024; } static unsigned int hsw_sprite_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */ return min(8192 * cpp, 16 * 1024); } static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 dvscntr = 0; if (crtc_state->gamma_enable) dvscntr |= DVS_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) dvscntr |= DVS_PIPE_CSC_ENABLE; return dvscntr; } static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 dvscntr; dvscntr = DVS_ENABLE; if (IS_SANDYBRIDGE(dev_priv)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; break; case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; break; case DRM_FORMAT_XBGR2101010: dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR; break; case DRM_FORMAT_XRGB2101010: dvscntr |= DVS_FORMAT_RGBX101010; break; case DRM_FORMAT_XBGR16161616F: dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR; break; case DRM_FORMAT_XRGB16161616F: dvscntr |= DVS_FORMAT_RGBX161616; break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; break; case DRM_FORMAT_YVYU: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; break; case DRM_FORMAT_UYVY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; break; case DRM_FORMAT_VYUY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; break; default: MISSING_CASE(fb->format->format); return 0; } if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) dvscntr |= DVS_YUV_FORMAT_BT709; if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE; if (fb->modifier == I915_FORMAT_MOD_X_TILED) dvscntr |= DVS_TILED; if (rotation & DRM_MODE_ROTATE_180) dvscntr |= DVS_ROTATE_180; if (key->flags & I915_SET_COLORKEY_DESTINATION) dvscntr |= DVS_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) dvscntr |= DVS_SOURCE_KEY; return dvscntr; } static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[8]; int i; /* Seems RGB data bypasses the gamma always */ if (!fb->format->is_yuv) return; i9xx_plane_linear_gamma(gamma); /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1), gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } static void ilk_sprite_linear_gamma(u16 gamma[17]) { int i; for (i = 0; i < 17; i++) gamma[i] = (i << 10) / 16; } static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[17]; int i; /* Seems RGB data bypasses the gamma always */ if (!fb->format->is_yuv) return; ilk_sprite_linear_gamma(gamma); /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i), gamma[i] << 20 | gamma[i] << 10 | gamma[i]); intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]); intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]); intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]); i++; } static void g4x_sprite_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u32 dvsscale = 0; if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | DVS_SRC_WIDTH(src_w - 1) | DVS_SRC_HEIGHT(src_h - 1); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, DVSPOS(pipe), DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x)); intel_de_write_fw(dev_priv, DVSSIZE(pipe), DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); } static void g4x_sprite_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 dvssurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; u32 dvscntr, linear_offset; dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (key->flags) { intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), key->channel_mask); intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value); } intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr); intel_de_write_fw(dev_priv, DVSSURF(pipe), intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) g4x_sprite_update_gamma(plane_state); else ilk_sprite_update_gamma(plane_state); } static void g4x_sprite_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0); /* Disable the scaler */ intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0); intel_de_write_fw(dev_priv, DVSSURF(pipe), 0); } static bool g4x_sprite_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE; *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static bool g4x_fb_scalable(const struct drm_framebuffer *fb) { if (!fb) return false; switch (fb->format->format) { case DRM_FORMAT_C8: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: return false; default: return true; } } static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_rect *src = &plane_state->uapi.src; const struct drm_rect *dst = &plane_state->uapi.dst; int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; unsigned int stride = plane_state->view.color_plane[0].mapping_stride; unsigned int cpp = fb->format->cpp[0]; unsigned int width_bytes; int min_width, min_height; crtc_w = drm_rect_width(dst); crtc_h = drm_rect_height(dst); src_x = src->x1 >> 16; src_w = drm_rect_width(src) >> 16; src_h = drm_rect_height(src) >> 16; if (src_w == crtc_w && src_h == crtc_h) return 0; min_width = 3; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (src_h & 1) { drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n"); return -EINVAL; } min_height = 6; } else { min_height = 3; } width_bytes = ((src_x * cpp) & 63) + src_w * cpp; if (src_w < min_width || src_h < min_height || src_w > 2048 || src_h > 2048) { drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", src_w, src_h, min_width, min_height, 2048, 2048); return -EINVAL; } if (width_bytes > 4096) { drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n", width_bytes, 4096); return -EINVAL; } if (stride > 4096) { drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n", stride, 4096); return -EINVAL; } return 0; } static int g4x_sprite_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int min_scale = DRM_PLANE_NO_SCALING; int max_scale = DRM_PLANE_NO_SCALING; int ret; if (g4x_fb_scalable(plane_state->hw.fb)) { if (DISPLAY_VER(dev_priv) < 7) { min_scale = 1; max_scale = 16 << 16; } else if (IS_IVYBRIDGE(dev_priv)) { min_scale = 1; max_scale = 2 << 16; } } ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, min_scale, max_scale, true); if (ret) return ret; ret = i9xx_check_plane_surface(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); if (ret) return ret; ret = g4x_sprite_check_scaling(crtc_state, plane_state); if (ret) return ret; if (DISPLAY_VER(dev_priv) >= 7) plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state); else plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state); return 0; } int chv_plane_check_rotation(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); unsigned int rotation = plane_state->hw.rotation; /* CHV ignores the mirror bit when the rotate bit is set :( */ if (IS_CHERRYVIEW(dev_priv) && rotation & DRM_MODE_ROTATE_180 && rotation & DRM_MODE_REFLECT_X) { drm_dbg_kms(&dev_priv->drm, "Cannot rotate and reflect at the same time\n"); return -EINVAL; } return 0; } static int vlv_sprite_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { int ret; ret = chv_plane_check_rotation(plane_state); if (ret) return ret; ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, true); if (ret) return ret; ret = i9xx_check_plane_surface(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); if (ret) return ret; plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state); return 0; } static const u32 g4x_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; static const u32 snb_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; static const u32 vlv_sprite_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; static const u32 chv_pipe_b_sprite_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; fallthrough; default: return false; } } static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; fallthrough; default: return false; } } static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; fallthrough; default: return false; } } static const struct drm_plane_funcs g4x_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = g4x_sprite_format_mod_supported, }; static const struct drm_plane_funcs snb_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = snb_sprite_format_mod_supported, }; static const struct drm_plane_funcs vlv_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = vlv_sprite_format_mod_supported, }; struct intel_plane * intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int sprite) { struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; plane = intel_plane_alloc(); if (IS_ERR(plane)) return plane; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { plane->update_noarm = vlv_sprite_update_noarm; plane->update_arm = vlv_sprite_update_arm; plane->disable_arm = vlv_sprite_disable_arm; plane->get_hw_state = vlv_sprite_get_hw_state; plane->check_plane = vlv_sprite_check; plane->max_stride = i965_plane_max_stride; plane->min_cdclk = vlv_plane_min_cdclk; if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { formats = chv_pipe_b_sprite_formats; num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); } else { formats = vlv_sprite_formats; num_formats = ARRAY_SIZE(vlv_sprite_formats); } plane_funcs = &vlv_sprite_funcs; } else if (DISPLAY_VER(dev_priv) >= 7) { plane->update_noarm = ivb_sprite_update_noarm; plane->update_arm = ivb_sprite_update_arm; plane->disable_arm = ivb_sprite_disable_arm; plane->get_hw_state = ivb_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { plane->max_stride = hsw_sprite_max_stride; plane->min_cdclk = hsw_plane_min_cdclk; } else { plane->max_stride = g4x_sprite_max_stride; plane->min_cdclk = ivb_sprite_min_cdclk; } formats = snb_sprite_formats; num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { plane->update_noarm = g4x_sprite_update_noarm; plane->update_arm = g4x_sprite_update_arm; plane->disable_arm = g4x_sprite_disable_arm; plane->get_hw_state = g4x_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; plane->max_stride = g4x_sprite_max_stride; plane->min_cdclk = g4x_sprite_min_cdclk; if (IS_SANDYBRIDGE(dev_priv)) { formats = snb_sprite_formats; num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { formats = g4x_sprite_formats; num_formats = ARRAY_SIZE(g4x_sprite_formats); plane_funcs = &g4x_sprite_funcs; } } if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_X; } else { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; } plane->pipe = pipe; plane->id = PLANE_SPRITE0 + sprite; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "sprite %c", sprite_name(pipe, sprite)); kfree(modifiers); if (ret) goto fail; drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, supported_rotations); drm_plane_create_color_properties(&plane->base, BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); zpos = sprite + 1; drm_plane_create_zpos_immutable_property(&plane->base, zpos); intel_plane_helper_add(plane); return plane; fail: intel_plane_free(plane); return ERR_PTR(ret); }
linux-master
drivers/gpu/drm/i915/display/intel_sprite.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <linux/log2.h> #include <linux/math64.h> #include "i915_reg.h" #include "intel_cx0_phy.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_hdmi.h" #include "intel_panel.h" #include "intel_psr.h" #include "intel_tc.h" #define MB_WRITE_COMMITTED true #define MB_WRITE_UNCOMMITTED false #define for_each_cx0_lane_in_mask(__lane_mask, __lane) \ for ((__lane) = 0; (__lane) < 2; (__lane)++) \ for_each_if((__lane_mask) & BIT(__lane)) #define INTEL_CX0_LANE0 BIT(0) #define INTEL_CX0_LANE1 BIT(1) #define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0) bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy) { if (IS_METEORLAKE(i915) && (phy < PHY_C)) return true; return false; } static int lane_mask_to_lane(u8 lane_mask) { if (WARN_ON((lane_mask & ~INTEL_CX0_BOTH_LANES) || hweight8(lane_mask) != 1)) return 0; return ilog2(lane_mask); } static void assert_dc_off(struct drm_i915_private *i915) { bool enabled; enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF); drm_WARN_ON(&i915->drm, !enabled); } /* * Prepare HW for CX0 phy transactions. * * It is required that PSR and DC5/6 are disabled before any CX0 message * bus transaction is executed. */ static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_pause(intel_dp); return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF); } static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_resume(intel_dp); intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref); } static void intel_clear_response_ready_flag(struct drm_i915_private *i915, enum port port, int lane) { intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); } static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane) { enum phy phy = intel_port_to_phy(i915, port); intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET); if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_RESET, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy)); return; } intel_clear_response_ready_flag(i915, port, lane); } static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, int command, int lane, u32 *val) { enum phy phy = intel_port_to_phy(i915, port); if (__intel_de_wait_for_register(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_PORT_P2M_RESPONSE_READY, XELPDP_MSGBUS_TIMEOUT_FAST_US, XELPDP_MSGBUS_TIMEOUT_SLOW, val)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", phy_name(phy), *val); intel_cx0_bus_reset(i915, port, lane); return -ETIMEDOUT; } if (*val & XELPDP_PORT_P2M_ERROR_SET) { drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(i915, port, lane); return -EINVAL; } if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); intel_cx0_bus_reset(i915, port, lane); return -EINVAL; } return 0; } static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, int lane, u16 addr) { enum phy phy = intel_port_to_phy(i915, port); int ack; u32 val; if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); intel_cx0_bus_reset(i915, port, lane); return -ETIMEDOUT; } intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | XELPDP_PORT_M2P_COMMAND_READ | XELPDP_PORT_M2P_ADDRESS(addr)); ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); if (ack < 0) return ack; intel_clear_response_ready_flag(i915, port, lane); return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port, int lane, u16 addr) { enum phy phy = intel_port_to_phy(i915, port); int i, status; assert_dc_off(i915); /* 3 tries is assumed to be enough to read successfully */ for (i = 0; i < 3; i++) { status = __intel_cx0_read_once(i915, port, lane, addr); if (status >= 0) return status; } drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", phy_name(phy), addr, i); return 0; } static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port, u8 lane_mask, u16 addr) { int lane = lane_mask_to_lane(lane_mask); return __intel_cx0_read(i915, port, lane, addr); } static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, int lane, u16 addr, u8 data, bool committed) { enum phy phy = intel_port_to_phy(i915, port); int ack; u32 val; if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(i915, port, lane); return -ETIMEDOUT; } intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING | (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | XELPDP_PORT_M2P_DATA(data) | XELPDP_PORT_M2P_ADDRESS(addr)); if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); intel_cx0_bus_reset(i915, port, lane); return -ETIMEDOUT; } if (committed) { ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); if (ack < 0) return ack; } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) & XELPDP_PORT_P2M_ERROR_SET)) { drm_dbg_kms(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy)); intel_cx0_bus_reset(i915, port, lane); return -EINVAL; } intel_clear_response_ready_flag(i915, port, lane); return 0; } static void __intel_cx0_write(struct drm_i915_private *i915, enum port port, int lane, u16 addr, u8 data, bool committed) { enum phy phy = intel_port_to_phy(i915, port); int i, status; assert_dc_off(i915); /* 3 tries is assumed to be enough to write successfully */ for (i = 0; i < 3; i++) { status = __intel_cx0_write_once(i915, port, lane, addr, data, committed); if (status == 0) return; } drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); } static void intel_cx0_write(struct drm_i915_private *i915, enum port port, u8 lane_mask, u16 addr, u8 data, bool committed) { int lane; for_each_cx0_lane_in_mask(lane_mask, lane) __intel_cx0_write(i915, port, lane, addr, data, committed); } static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port, int lane, u16 addr, u16 data) { assert_dc_off(i915); intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0); intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1); } static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port, int lane, u16 addr) { u16 val; assert_dc_off(i915); intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H); val <<= 8; val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L); return val; } static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port, int lane, u16 addr, u8 clear, u8 set, bool committed) { u8 old, val; old = __intel_cx0_read(i915, port, lane, addr); val = (old & ~clear) | set; if (val != old) __intel_cx0_write(i915, port, lane, addr, val, committed); } static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port, u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) { u8 lane; for_each_cx0_lane_in_mask(lane_mask, lane) __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed); } static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state) { if (intel_crtc_has_dp_encoder(crtc_state)) { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && (crtc_state->port_clock == 540000 || crtc_state->port_clock == 810000)) return 5; else return 4; } else { return 5; } } static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) { if (intel_crtc_has_dp_encoder(crtc_state)) { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && (crtc_state->port_clock == 540000 || crtc_state->port_clock == 810000)) return 5; else return 2; } else { return 6; } } void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(i915, encoder->port); intel_wakeref_t wakeref; int n_entries, ln; wakeref = intel_cx0_phy_transaction_begin(encoder); trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&i915->drm, !trans)) { intel_cx0_phy_transaction_end(encoder, wakeref); return; } if (intel_is_c10phy(i915, phy)) { intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CMN(3), C10_CMN3_TXVBOOST_MASK, C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)), MB_WRITE_UNCOMMITTED); intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_TX(1), C10_TX1_TERMCTL_MASK, C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)), MB_WRITE_COMMITTED); } for (ln = 0; ln < crtc_state->lane_count; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); int lane, tx; lane = ln / 2; tx = ln % 2; intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 0), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor), MB_WRITE_COMMITTED); intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 1), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing), MB_WRITE_COMMITTED); intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 2), C10_PHY_OVRD_LEVEL_MASK, C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor), MB_WRITE_COMMITTED); } /* Write Override enables in 0xD71 */ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_OVRD, 0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2, MB_WRITE_COMMITTED); if (intel_is_c10phy(i915, phy)) intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); intel_cx0_phy_transaction_end(encoder, wakeref); } /* * Basic DP link rates with 38.4 MHz reference clock. * Note: The tables below are with SSC. In non-ssc * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be * programmed 0. */ static const struct intel_c10pll_state mtl_c10_dp_rbr = { .clock = 162000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0xB4, .pll[1] = 0, .pll[2] = 0x30, .pll[3] = 0x1, .pll[4] = 0x26, .pll[5] = 0x0C, .pll[6] = 0x98, .pll[7] = 0x46, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xC0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x2, .pll[16] = 0x84, .pll[17] = 0x4F, .pll[18] = 0xE5, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_edp_r216 = { .clock = 216000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0x4, .pll[1] = 0, .pll[2] = 0xA2, .pll[3] = 0x1, .pll[4] = 0x33, .pll[5] = 0x10, .pll[6] = 0x75, .pll[7] = 0xB3, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x2, .pll[16] = 0x85, .pll[17] = 0x0F, .pll[18] = 0xE6, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_edp_r243 = { .clock = 243000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0x34, .pll[1] = 0, .pll[2] = 0xDA, .pll[3] = 0x1, .pll[4] = 0x39, .pll[5] = 0x12, .pll[6] = 0xE3, .pll[7] = 0xE9, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0x20, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x2, .pll[16] = 0x85, .pll[17] = 0x8F, .pll[18] = 0xE6, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_dp_hbr1 = { .clock = 270000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0xF4, .pll[1] = 0, .pll[2] = 0xF8, .pll[3] = 0x0, .pll[4] = 0x20, .pll[5] = 0x0A, .pll[6] = 0x29, .pll[7] = 0x10, .pll[8] = 0x1, /* Verify */ .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xA0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x1, .pll[16] = 0x84, .pll[17] = 0x4F, .pll[18] = 0xE5, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_edp_r324 = { .clock = 324000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0xB4, .pll[1] = 0, .pll[2] = 0x30, .pll[3] = 0x1, .pll[4] = 0x26, .pll[5] = 0x0C, .pll[6] = 0x98, .pll[7] = 0x46, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xC0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x1, .pll[16] = 0x85, .pll[17] = 0x4F, .pll[18] = 0xE6, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_edp_r432 = { .clock = 432000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0x4, .pll[1] = 0, .pll[2] = 0xA2, .pll[3] = 0x1, .pll[4] = 0x33, .pll[5] = 0x10, .pll[6] = 0x75, .pll[7] = 0xB3, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x1, .pll[16] = 0x85, .pll[17] = 0x0F, .pll[18] = 0xE6, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_dp_hbr2 = { .clock = 540000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0xF4, .pll[1] = 0, .pll[2] = 0xF8, .pll[3] = 0, .pll[4] = 0x20, .pll[5] = 0x0A, .pll[6] = 0x29, .pll[7] = 0x10, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xA0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0, .pll[16] = 0x84, .pll[17] = 0x4F, .pll[18] = 0xE5, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_edp_r675 = { .clock = 675000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0xB4, .pll[1] = 0, .pll[2] = 0x3E, .pll[3] = 0x1, .pll[4] = 0xA8, .pll[5] = 0x0C, .pll[6] = 0x33, .pll[7] = 0x54, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xC8, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0, .pll[16] = 0x85, .pll[17] = 0x8F, .pll[18] = 0xE6, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_dp_hbr3 = { .clock = 810000, .tx = 0x10, .cmn = 0x21, .pll[0] = 0x34, .pll[1] = 0, .pll[2] = 0x84, .pll[3] = 0x1, .pll[4] = 0x30, .pll[5] = 0x0F, .pll[6] = 0x3D, .pll[7] = 0x98, .pll[8] = 0x1, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0xF0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0, .pll[16] = 0x84, .pll[17] = 0x0F, .pll[18] = 0xE5, .pll[19] = 0x23, }; static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = { &mtl_c10_dp_rbr, &mtl_c10_dp_hbr1, &mtl_c10_dp_hbr2, &mtl_c10_dp_hbr3, NULL, }; static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = { &mtl_c10_dp_rbr, &mtl_c10_edp_r216, &mtl_c10_edp_r243, &mtl_c10_dp_hbr1, &mtl_c10_edp_r324, &mtl_c10_edp_r432, &mtl_c10_dp_hbr2, &mtl_c10_edp_r675, &mtl_c10_dp_hbr3, NULL, }; /* C20 basic DP 1.4 tables */ static const struct intel_c20pll_state mtl_c20_dp_rbr = { .link_bit_rate = 162000, .clock = 162000, .tx = { 0xbe88, /* tx cfg0 */ 0x5800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x50a8, /* mpllb cfg0 */ 0x2120, /* mpllb cfg1 */ 0xcd9a, /* mpllb cfg2 */ 0xbfc1, /* mpllb cfg3 */ 0x5ab8, /* mpllb cfg4 */ 0x4c34, /* mpllb cfg5 */ 0x2000, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x6000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0000, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_dp_hbr1 = { .link_bit_rate = 270000, .clock = 270000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x308c, /* mpllb cfg0 */ 0x2110, /* mpllb cfg1 */ 0xcc9c, /* mpllb cfg2 */ 0xbfc1, /* mpllb cfg3 */ 0x4b9a, /* mpllb cfg4 */ 0x3f81, /* mpllb cfg5 */ 0x2000, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x5000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0000, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_dp_hbr2 = { .link_bit_rate = 540000, .clock = 540000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x108c, /* mpllb cfg0 */ 0x2108, /* mpllb cfg1 */ 0xcc9c, /* mpllb cfg2 */ 0xbfc1, /* mpllb cfg3 */ 0x4b9a, /* mpllb cfg4 */ 0x3f81, /* mpllb cfg5 */ 0x2000, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x5000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0000, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { .link_bit_rate = 810000, .clock = 810000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x10d2, /* mpllb cfg0 */ 0x2108, /* mpllb cfg1 */ 0x8d98, /* mpllb cfg2 */ 0xbfc1, /* mpllb cfg3 */ 0x7166, /* mpllb cfg4 */ 0x5f42, /* mpllb cfg5 */ 0x2000, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x7800, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0000, /* mpllb cfg10 */ }, }; /* C20 basic DP 2.0 tables */ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { .link_bit_rate = 1000000, /* 10 Gbps */ .clock = 312500, .tx = { 0xbe21, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mplla = { 0x3104, /* mplla cfg0 */ 0xd105, /* mplla cfg1 */ 0xc025, /* mplla cfg2 */ 0xc025, /* mplla cfg3 */ 0x8c00, /* mplla cfg4 */ 0x759a, /* mplla cfg5 */ 0x4000, /* mplla cfg6 */ 0x0003, /* mplla cfg7 */ 0x3555, /* mplla cfg8 */ 0x0001, /* mplla cfg9 */ }, }; static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = { .link_bit_rate = 1350000, /* 13.5 Gbps */ .clock = 421875, .tx = { 0xbea0, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x015f, /* mpllb cfg0 */ 0x2205, /* mpllb cfg1 */ 0x1b17, /* mpllb cfg2 */ 0xffc1, /* mpllb cfg3 */ 0xe100, /* mpllb cfg4 */ 0xbd00, /* mpllb cfg5 */ 0x2000, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x4800, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0000, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { .link_bit_rate = 2000000, /* 20 Gbps */ .clock = 625000, .tx = { 0xbe20, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = {0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mplla = { 0x3104, /* mplla cfg0 */ 0xd105, /* mplla cfg1 */ 0xc025, /* mplla cfg2 */ 0xc025, /* mplla cfg3 */ 0xa6ab, /* mplla cfg4 */ 0x8c00, /* mplla cfg5 */ 0x4000, /* mplla cfg6 */ 0x0003, /* mplla cfg7 */ 0x3555, /* mplla cfg8 */ 0x0001, /* mplla cfg9 */ }, }; static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = { &mtl_c20_dp_rbr, &mtl_c20_dp_hbr1, &mtl_c20_dp_hbr2, &mtl_c20_dp_hbr3, &mtl_c20_dp_uhbr10, &mtl_c20_dp_uhbr13_5, &mtl_c20_dp_uhbr20, NULL, }; /* * HDMI link rates with 38.4 MHz reference clock. */ static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = { .clock = 25200, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x4, .pll[1] = 0, .pll[2] = 0xB2, .pll[3] = 0, .pll[4] = 0, .pll[5] = 0, .pll[6] = 0, .pll[7] = 0, .pll[8] = 0x20, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0xD, .pll[16] = 0x6, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = { .clock = 27000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0, .pll[2] = 0xC0, .pll[3] = 0, .pll[4] = 0, .pll[5] = 0, .pll[6] = 0, .pll[7] = 0, .pll[8] = 0x20, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0x80, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0xD, .pll[16] = 0x6, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = { .clock = 74250, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0, .pll[2] = 0x7A, .pll[3] = 0, .pll[4] = 0, .pll[5] = 0, .pll[6] = 0, .pll[7] = 0, .pll[8] = 0x20, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0x58, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0xB, .pll[16] = 0x6, .pll[17] = 0xF, .pll[18] = 0x85, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = { .clock = 148500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0, .pll[2] = 0x7A, .pll[3] = 0, .pll[4] = 0, .pll[5] = 0, .pll[6] = 0, .pll[7] = 0, .pll[8] = 0x20, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0x58, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0xA, .pll[16] = 0x6, .pll[17] = 0xF, .pll[18] = 0x85, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_594 = { .clock = 594000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0, .pll[2] = 0x7A, .pll[3] = 0, .pll[4] = 0, .pll[5] = 0, .pll[6] = 0, .pll[7] = 0, .pll[8] = 0x20, .pll[9] = 0x1, .pll[10] = 0, .pll[11] = 0, .pll[12] = 0x58, .pll[13] = 0, .pll[14] = 0, .pll[15] = 0x8, .pll[16] = 0x6, .pll[17] = 0xF, .pll[18] = 0x85, .pll[19] = 0x23, }; /* Precomputed C10 HDMI PLL tables */ static const struct intel_c10pll_state mtl_c10_hdmi_27027 = { .clock = 27027, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xCC, .pll[12] = 0x9C, .pll[13] = 0xCB, .pll[14] = 0xCC, .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_28320 = { .clock = 28320, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_30240 = { .clock = 30240, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_31500 = { .clock = 31500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xA0, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0C, .pll[16] = 0x09, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_36000 = { .clock = 36000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_40000 = { .clock = 40000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x55, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_49500 = { .clock = 49500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_50000 = { .clock = 50000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x2A, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_57284 = { .clock = 57284, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x77, .pll[12] = 0x57, .pll[13] = 0x77, .pll[14] = 0x77, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_58000 = { .clock = 58000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xD5, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_65000 = { .clock = 65000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xB5, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_71000 = { .clock = 71000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_74176 = { .clock = 74176, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_75000 = { .clock = 75000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_78750 = { .clock = 78750, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x08, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_85500 = { .clock = 85500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x10, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_88750 = { .clock = 88750, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x72, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_106500 = { .clock = 106500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xF0, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_108000 = { .clock = 108000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x80, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_115500 = { .clock = 115500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_119000 = { .clock = 119000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_135000 = { .clock = 135000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0A, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_138500 = { .clock = 138500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x22, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_147160 = { .clock = 147160, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xA5, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_148352 = { .clock = 148352, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_154000 = { .clock = 154000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x35, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_162000 = { .clock = 162000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x60, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_167000 = { .clock = 167000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0xFA, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_197802 = { .clock = 197802, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x99, .pll[12] = 0x05, .pll[13] = 0x98, .pll[14] = 0x99, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_198000 = { .clock = 198000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_209800 = { .clock = 209800, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x45, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_241500 = { .clock = 241500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xC8, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_262750 = { .clock = 262750, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x6C, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_268500 = { .clock = 268500, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xEC, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_296703 = { .clock = 296703, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x33, .pll[12] = 0x44, .pll[13] = 0x33, .pll[14] = 0x33, .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_297000 = { .clock = 297000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x58, .pll[13] = 0x00, .pll[14] = 0x00, .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_319750 = { .clock = 319750, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x44, .pll[13] = 0xA9, .pll[14] = 0xAA, .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_497750 = { .clock = 497750, .tx = 0x10, .cmn = 0x1, .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x9F, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_592000 = { .clock = 592000, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x15, .pll[13] = 0x55, .pll[14] = 0x55, .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state mtl_c10_hdmi_593407 = { .clock = 593407, .tx = 0x10, .cmn = 0x1, .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, .pll[10] = 0xFF, .pll[11] = 0x3B, .pll[12] = 0x44, .pll[13] = 0xBA, .pll[14] = 0xBB, .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, }; static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = { &mtl_c10_hdmi_25_2, /* Consolidated Table */ &mtl_c10_hdmi_27_0, /* Consolidated Table */ &mtl_c10_hdmi_27027, &mtl_c10_hdmi_28320, &mtl_c10_hdmi_30240, &mtl_c10_hdmi_31500, &mtl_c10_hdmi_36000, &mtl_c10_hdmi_40000, &mtl_c10_hdmi_49500, &mtl_c10_hdmi_50000, &mtl_c10_hdmi_57284, &mtl_c10_hdmi_58000, &mtl_c10_hdmi_65000, &mtl_c10_hdmi_71000, &mtl_c10_hdmi_74176, &mtl_c10_hdmi_74_25, /* Consolidated Table */ &mtl_c10_hdmi_75000, &mtl_c10_hdmi_78750, &mtl_c10_hdmi_85500, &mtl_c10_hdmi_88750, &mtl_c10_hdmi_106500, &mtl_c10_hdmi_108000, &mtl_c10_hdmi_115500, &mtl_c10_hdmi_119000, &mtl_c10_hdmi_135000, &mtl_c10_hdmi_138500, &mtl_c10_hdmi_147160, &mtl_c10_hdmi_148352, &mtl_c10_hdmi_148_5, /* Consolidated Table */ &mtl_c10_hdmi_154000, &mtl_c10_hdmi_162000, &mtl_c10_hdmi_167000, &mtl_c10_hdmi_197802, &mtl_c10_hdmi_198000, &mtl_c10_hdmi_209800, &mtl_c10_hdmi_241500, &mtl_c10_hdmi_262750, &mtl_c10_hdmi_268500, &mtl_c10_hdmi_296703, &mtl_c10_hdmi_297000, &mtl_c10_hdmi_319750, &mtl_c10_hdmi_497750, &mtl_c10_hdmi_592000, &mtl_c10_hdmi_593407, &mtl_c10_hdmi_594, /* Consolidated Table */ NULL, }; static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = { .link_bit_rate = 25175, .clock = 25175, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0xa0d2, /* mpllb cfg0 */ 0x7d80, /* mpllb cfg1 */ 0x0906, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x0200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x0000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0001, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = { .link_bit_rate = 27000, .clock = 27000, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0xa0e0, /* mpllb cfg0 */ 0x7d80, /* mpllb cfg1 */ 0x0906, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x8000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0001, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = { .link_bit_rate = 74250, .clock = 74250, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x609a, /* mpllb cfg0 */ 0x7d40, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x5800, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0001, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = { .link_bit_rate = 148500, .clock = 148500, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x409a, /* mpllb cfg0 */ 0x7d20, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x5800, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0001, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_594 = { .link_bit_rate = 594000, .clock = 594000, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x009a, /* mpllb cfg0 */ 0x7d08, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x5800, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0001, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_300 = { .link_bit_rate = 3000000, .clock = 166670, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x209c, /* mpllb cfg0 */ 0x7d10, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x2000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0004, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_600 = { .link_bit_rate = 6000000, .clock = 333330, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x009c, /* mpllb cfg0 */ 0x7d08, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x2000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0004, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_800 = { .link_bit_rate = 8000000, .clock = 444440, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x00d0, /* mpllb cfg0 */ 0x7d08, /* mpllb cfg1 */ 0x4a06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0003, /* mpllb cfg7 */ 0x2aaa, /* mpllb cfg8 */ 0x0002, /* mpllb cfg9 */ 0x0004, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { .link_bit_rate = 10000000, .clock = 555560, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x1104, /* mpllb cfg0 */ 0x7d08, /* mpllb cfg1 */ 0x0a06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0003, /* mpllb cfg7 */ 0x3555, /* mpllb cfg8 */ 0x0001, /* mpllb cfg9 */ 0x0004, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { .link_bit_rate = 12000000, .clock = 666670, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x0138, /* mpllb cfg0 */ 0x7d08, /* mpllb cfg1 */ 0x5486, /* mpllb cfg2 */ 0xfe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ 0x0000, /* mpllb cfg5 */ 0x2200, /* mpllb cfg6 */ 0x0001, /* mpllb cfg7 */ 0x4000, /* mpllb cfg8 */ 0x0000, /* mpllb cfg9 */ 0x0004, /* mpllb cfg10 */ }, }; static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = { &mtl_c20_hdmi_25_175, &mtl_c20_hdmi_27_0, &mtl_c20_hdmi_74_25, &mtl_c20_hdmi_148_5, &mtl_c20_hdmi_594, &mtl_c20_hdmi_300, &mtl_c20_hdmi_600, &mtl_c20_hdmi_800, &mtl_c20_hdmi_1000, &mtl_c20_hdmi_1200, NULL, }; static int intel_c10_phy_check_hdmi_link_rate(int clock) { const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables; int i; for (i = 0; tables[i]; i++) { if (clock == tables[i]->clock) return MODE_OK; } return MODE_CLOCK_RANGE; } static const struct intel_c10pll_state * const * intel_c10pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { if (intel_crtc_has_dp_encoder(crtc_state)) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return mtl_c10_edp_tables; else return mtl_c10_dp_tables; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { return mtl_c10_hdmi_tables; } MISSING_CASE(encoder->type); return NULL; } static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state; int i; if (intel_crtc_has_dp_encoder(crtc_state)) { if (intel_panel_use_ssc(i915)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); pll_state->ssc_enabled = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); } } if (pll_state->ssc_enabled) return; drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9); for (i = 4; i < 9; i++) pll_state->c10.pll[i] = 0; } static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { const struct intel_c10pll_state * const *tables; int i; tables = intel_c10pll_tables_get(crtc_state, encoder); if (!tables) return -EINVAL; for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { crtc_state->cx0pll_state.c10 = *tables[i]; intel_c10pll_update_pll(crtc_state, encoder); return 0; } } return -EINVAL; } void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 lane = INTEL_CX0_LANE0; intel_wakeref_t wakeref; int i; wakeref = intel_cx0_phy_transaction_begin(encoder); /* * According to C10 VDR Register programming Sequence we need * to do this to read PHY internal registers from MsgBus. */ intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_PLL(i)); pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0)); pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0)); intel_cx0_phy_transaction_end(encoder, wakeref); } static void intel_c10_pll_program(struct drm_i915_private *i915, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10; int i; intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); /* Custom width needs to be programmed to 0 for both the phy lanes */ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, MB_WRITE_COMMITTED); intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); /* Program the pll values only for the master lane */ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), pll_state->pll[i], (i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED); intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); } void intel_c10pll_dump_hw_state(struct drm_i915_private *i915, const struct intel_c10pll_state *hw_state) { bool fracen; int i; unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; unsigned int multiplier, tx_clk_div; fracen = hw_state->pll[0] & C10_PLL0_FRACEN; drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen)); if (fracen) { frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n", frac_quot, frac_rem, frac_den); } multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | hw_state->pll[2]) / 2 + 16; tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); drm_dbg_kms(&i915->drm, "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:"); drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); } static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state) { u64 datarate; u64 mpll_tx_clk_div; u64 vco_freq_shift; u64 vco_freq; u64 multiplier; u64 mpll_multiplier; u64 mpll_fracn_quot; u64 mpll_fracn_rem; u8 mpllb_ana_freq_vco; u8 mpll_div_multiplier; if (pixel_clock < 25175 || pixel_clock > 600000) return -EINVAL; datarate = ((u64)pixel_clock * 1000) * 10; mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); vco_freq = (datarate << vco_freq_shift) >> 8; multiplier = div64_u64((vco_freq << 28), (REFCLK_38_4_MHZ >> 4)); mpll_multiplier = 2 * (multiplier >> 32); mpll_fracn_quot = (multiplier >> 16) & 0xFFFF; mpll_fracn_rem = multiplier & 0xFFFF; mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)), datarate), 255); if (vco_freq <= DATARATE_3000000000) mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3; else if (vco_freq <= DATARATE_3500000000) mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_2; else if (vco_freq <= DATARATE_4000000000) mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_1; else mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; pll_state->link_bit_rate = pixel_clock; pll_state->clock = pixel_clock; pll_state->tx[0] = 0xbe88; pll_state->tx[1] = 0x9800; pll_state->tx[2] = 0x0000; pll_state->cmn[0] = 0x0500; pll_state->cmn[1] = 0x0005; pll_state->cmn[2] = 0x0000; pll_state->cmn[3] = 0x0000; pll_state->mpllb[0] = (MPLL_TX_CLK_DIV(mpll_tx_clk_div) | MPLL_MULTIPLIER(mpll_multiplier)); pll_state->mpllb[1] = (CAL_DAC_CODE(CAL_DAC_CODE_31) | WORD_CLK_DIV | MPLL_DIV_MULTIPLIER(mpll_div_multiplier)); pll_state->mpllb[2] = (MPLLB_ANA_FREQ_VCO(mpllb_ana_freq_vco) | CP_PROP(CP_PROP_20) | CP_INT(CP_INT_6)); pll_state->mpllb[3] = (V2I(V2I_2) | CP_PROP_GS(CP_PROP_GS_30) | CP_INT_GS(CP_INT_GS_28)); pll_state->mpllb[4] = 0x0000; pll_state->mpllb[5] = 0x0000; pll_state->mpllb[6] = (C20_MPLLB_FRACEN | SSC_UP_SPREAD); pll_state->mpllb[7] = MPLL_FRACN_DEN; pll_state->mpllb[8] = mpll_fracn_quot; pll_state->mpllb[9] = mpll_fracn_rem; pll_state->mpllb[10] = HDMI_DIV(HDMI_DIV_1); return 0; } static int intel_c20_phy_check_hdmi_link_rate(int clock) { const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables; int i; for (i = 0; tables[i]; i++) { if (clock == tables[i]->link_bit_rate) return MODE_OK; } if (clock >= 25175 && clock <= 594000) return MODE_OK; return MODE_CLOCK_RANGE; } int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock) { struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi); struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); if (intel_is_c10phy(i915, phy)) return intel_c10_phy_check_hdmi_link_rate(clock); return intel_c20_phy_check_hdmi_link_rate(clock); } static const struct intel_c20pll_state * const * intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { if (intel_crtc_has_dp_encoder(crtc_state)) return mtl_c20_dp_tables; else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return mtl_c20_hdmi_tables; MISSING_CASE(encoder->type); return NULL; } static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { const struct intel_c20pll_state * const *tables; int i; /* try computed C20 HDMI tables before using consolidated tables */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock, &crtc_state->cx0pll_state.c20) == 0) return 0; } tables = intel_c20_pll_tables_get(crtc_state, encoder); if (!tables) return -EINVAL; for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->link_bit_rate) { crtc_state->cx0pll_state.c20 = *tables[i]; return 0; } } return -EINVAL; } int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_is_c10phy(i915, phy)) return intel_c10pll_calc_state(crtc_state, encoder); return intel_c20pll_calc_state(crtc_state, encoder); } static bool intel_c20_use_mplla(u32 clock) { /* 10G and 20G rates use MPLLA */ if (clock == 312500 || clock == 625000) return true; return false; } void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); bool cntx; intel_wakeref_t wakeref; int i; wakeref = intel_cx0_phy_transaction_begin(encoder); /* 1. Read current context selection */ cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE; /* Read Tx configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i)); else pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i)); } /* Read common configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i)); else pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i)); } if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { /* MPLLB configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_MPLLB_CNTX_CFG(i)); else pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_MPLLB_CNTX_CFG(i)); } } else { /* MPLLA configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_MPLLA_CNTX_CFG(i)); else pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_MPLLA_CNTX_CFG(i)); } } intel_cx0_phy_transaction_end(encoder, wakeref); } void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, const struct intel_c20pll_state *hw_state) { int i; drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n"); drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); if (intel_c20_use_mplla(hw_state->clock)) { for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); } else { for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); } } static u8 intel_c20_get_dp_rate(u32 clock) { switch (clock) { case 162000: /* 1.62 Gbps DP1.4 */ return 0; case 270000: /* 2.7 Gbps DP1.4 */ return 1; case 540000: /* 5.4 Gbps DP 1.4 */ return 2; case 810000: /* 8.1 Gbps DP1.4 */ return 3; case 216000: /* 2.16 Gbps eDP */ return 4; case 243000: /* 2.43 Gbps eDP */ return 5; case 324000: /* 3.24 Gbps eDP */ return 6; case 432000: /* 4.32 Gbps eDP */ return 7; case 312500: /* 10 Gbps DP2.0 */ return 8; case 421875: /* 13.5 Gbps DP2.0 */ return 9; case 625000: /* 20 Gbps DP2.0*/ return 10; case 648000: /* 6.48 Gbps eDP*/ return 11; case 675000: /* 6.75 Gbps eDP*/ return 12; default: MISSING_CASE(clock); return 0; } } static u8 intel_c20_get_hdmi_rate(u32 clock) { if (clock >= 25175 && clock <= 600000) return 0; switch (clock) { case 166670: /* 3 Gbps */ case 333330: /* 6 Gbps */ case 666670: /* 12 Gbps */ return 1; case 444440: /* 8 Gbps */ return 2; case 555560: /* 10 Gbps */ return 3; default: MISSING_CASE(clock); return 0; } } static bool is_dp2(u32 clock) { /* DP2.0 clock rates */ if (clock == 312500 || clock == 421875 || clock == 625000) return true; return false; } static bool is_hdmi_frl(u32 clock) { switch (clock) { case 166670: /* 3 Gbps */ case 333330: /* 6 Gbps */ case 444440: /* 8 Gbps */ case 555560: /* 10 Gbps */ case 666670: /* 12 Gbps */ return true; default: return false; } } static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); /* banks should not be cleared for DPALT/USB4/TBT modes */ /* TODO: optimize re-calibration in legacy mode */ return intel_tc_port_in_legacy_mode(intel_dig_port); } static int intel_get_c20_custom_width(u32 clock, bool dp) { if (dp && is_dp2(clock)) return 2; else if (is_hdmi_frl(clock)) return 1; else return 0; } static void intel_c20_pll_program(struct drm_i915_private *i915, const struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20; bool dp = false; int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0; bool cntx; int i; if (intel_crtc_has_dp_encoder(crtc_state)) dp = true; /* 1. Read current context selection */ cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0); /* * 2. If there is a protocol switch from HDMI to DP or vice versa, clear * the lane #0 MPLLB CAL_DONE_BANK DP2.0 10G and 20G rates enable MPLLA. * Protocol switch is only applicable for MPLLA */ if (intel_c20_protocol_switch_valid(encoder)) { for (i = 0; i < 4; i++) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0); usleep_range(4000, 4100); } /* 3. Write SRAM configuration context. If A in use, write configuration to B context */ /* 3.1 Tx configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]); } /* 3.2 common configuration */ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]); } /* 3.3 mpllb or mplla configuration */ if (intel_c20_use_mplla(pll_state->clock)) { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_MPLLA_CNTX_CFG(i), pll_state->mplla[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_MPLLA_CNTX_CFG(i), pll_state->mplla[i]); } } else { for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_MPLLB_CNTX_CFG(i), pll_state->mpllb[i]); else intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_MPLLB_CNTX_CFG(i), pll_state->mpllb[i]); } } /* 4. Program custom width to match the link protocol */ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH, PHY_C20_CUSTOM_WIDTH_MASK, PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)), MB_WRITE_COMMITTED); /* 5. For DP or 6. For HDMI */ if (dp) { intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)), MB_WRITE_COMMITTED); } else { intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, is_hdmi_frl(pll_state->clock) ? BIT(7) : 0, MB_WRITE_COMMITTED); intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, intel_c20_get_hdmi_rate(pll_state->clock), MB_WRITE_COMMITTED); } /* * 7. Write Vendor specific registers to toggle context setting to load * the updated programming toggle context bit */ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); } int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_c10pll_state *pll_state) { unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400; int tmpclk = 0; if (pll_state->pll[0] & C10_PLL0_FRACEN) { frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11]; frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13]; frac_den = pll_state->pll[10] << 8 | pll_state->pll[9]; } multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 | pll_state->pll[2]) / 2 + 16; tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]); hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]); tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) + DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den), 10 << (tx_clk_div + 16)); tmpclk *= (hdmi_div ? 2 : 1); return tmpclk; } int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_c20pll_state *pll_state) { unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; unsigned int multiplier, refclk = 38400; unsigned int tx_clk_div; unsigned int ref_clk_mpllb_div; unsigned int fb_clk_div4_en; unsigned int ref, vco; unsigned int tx_rate_mult; unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]); if (pll_state->tx[0] & C20_PHY_USE_MPLLB) { tx_rate_mult = 1; frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]); frac_quot = pll_state->mpllb[8]; frac_rem = pll_state->mpllb[9]; frac_den = pll_state->mpllb[7]; multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]); tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]); ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]); fb_clk_div4_en = 0; } else { tx_rate_mult = 2; frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]); frac_quot = pll_state->mplla[8]; frac_rem = pll_state->mplla[9]; frac_den = pll_state->mplla[7]; multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]); tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]); ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]); fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]); } if (frac_en) frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den); else frac = 0; ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div); vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10); return vco << tx_rate_mult >> tx_clk_div >> tx_rate; } static void intel_program_port_clock_ctl(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, bool lane_reversal) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 val = 0; intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL, lane_reversal ? XELPDP_PORT_REVERSAL : 0); if (lane_reversal) val |= XELPDP_LANE1_PHY_CLOCK_SELECT; val |= XELPDP_FORWARD_CLOCK_UNGATE; if (is_hdmi_frl(crtc_state->port_clock)) val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK); else val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK); /* TODO: HDMI FRL */ /* DP2.0 10G and 20G rates enable MPLLA*/ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; else val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val); } static u32 intel_cx0_get_powerdown_update(u8 lane_mask) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_POWERDOWN_UPDATE(lane); return val; } static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state); return val; } static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915, enum port port, u8 lane_mask, u8 state) { enum phy phy = intel_port_to_phy(i915, port); int lane; intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), intel_cx0_get_powerdown_state(lane_mask, state)); /* Wait for pending transactions.*/ for_each_cx0_lane_in_mask(lane_mask, lane) if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane), XELPDP_PORT_M2P_TRANSACTION_PENDING, XELPDP_MSGBUS_TIMEOUT_SLOW)) { drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy)); intel_cx0_bus_reset(i915, port, lane); } intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), intel_cx0_get_powerdown_update(lane_mask)); /* Update Timeout Value */ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), intel_cx0_get_powerdown_update(lane_mask), 0, XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); } static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port) { intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), XELPDP_POWER_STATE_READY_MASK, XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY)); intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port), XELPDP_POWER_STATE_ACTIVE_MASK | XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) | XELPDP_PLL_LANE_STAGGERING_DELAY(0)); } static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane); return val; } static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane); return val; } static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, struct intel_encoder *encoder, bool lane_reversal) { enum port port = encoder->port; enum phy phy = intel_port_to_phy(i915, port); bool both_lanes = intel_tc_port_fia_max_lane_count(enc_to_dig_port(encoder)) > 2; u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; u32 lane_pipe_reset = both_lanes ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1) : XELPDP_LANE_PIPE_RESET(0); u32 lane_phy_current_status = both_lanes ? XELPDP_LANE_PHY_CURRENT_STATUS(0) | XELPDP_LANE_PHY_CURRENT_STATUS(1) : XELPDP_LANE_PHY_CURRENT_STATUS(0); if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port), XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_PHY_READY, XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1), lane_pipe_reset); if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status, lane_phy_current_status, XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n", phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US); intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port), intel_cx0_get_pclk_refclk_request(both_lanes ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0), intel_cx0_get_pclk_refclk_request(lane_mask)); if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port), intel_cx0_get_pclk_refclk_ack(both_lanes ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0), intel_cx0_get_pclk_refclk_ack(lane_mask), XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n", phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US); intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_RESET); intel_cx0_setup_powerdown(i915, port); intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0); if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status, XELPDP_PORT_RESET_END_TIMEOUT)) drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n", phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT); } static void intel_cx0_program_phy_lane(struct drm_i915_private *i915, struct intel_encoder *encoder, int lane_count, bool lane_reversal) { u8 l0t1, l0t2, l1t1, l1t2; bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder)); enum port port = encoder->port; if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); /* TODO: DP-alt MFD case where only one PHY lane should be programmed. */ l0t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2)); l0t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2)); l1t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2)); l1t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2)); l0t1 |= CONTROL2_DISABLE_SINGLE_TX; l0t2 |= CONTROL2_DISABLE_SINGLE_TX; l1t1 |= CONTROL2_DISABLE_SINGLE_TX; l1t2 |= CONTROL2_DISABLE_SINGLE_TX; if (lane_reversal) { switch (lane_count) { case 4: l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; fallthrough; case 3: l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; fallthrough; case 2: l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX; fallthrough; case 1: l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX; break; default: MISSING_CASE(lane_count); } } else { switch (lane_count) { case 4: l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX; fallthrough; case 3: l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX; fallthrough; case 2: l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; break; case 1: if (dp_alt_mode) l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX; else l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX; break; default: MISSING_CASE(lane_count); } } /* disable MLs */ intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2), l0t1, MB_WRITE_COMMITTED); intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2), l0t2, MB_WRITE_COMMITTED); intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2), l1t1, MB_WRITE_COMMITTED); intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2), l1t2, MB_WRITE_COMMITTED); if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1), 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED); } static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane); return val; } static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) { u32 val = 0; int lane = 0; for_each_cx0_lane_in_mask(lane_mask, lane) val |= XELPDP_LANE_PCLK_PLL_ACK(lane); return val; } static void intel_cx0pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); /* * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal); /* 2. Bring PHY out of reset. */ intel_cx0_phy_lane_reset(i915, encoder, lane_reversal); /* * 3. Change Phy power state to Ready. * TODO: For DP alt mode use only one lane. */ intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, CX0_P2_STATE_READY); /* 4. Program PHY internal PLL internal registers. */ if (intel_is_c10phy(i915, phy)) intel_c10_pll_program(i915, crtc_state, encoder); else intel_c20_pll_program(i915, crtc_state, encoder); /* * 5. Program the enabled and disabled owned PHY lane * transmitters over message bus */ intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal); /* * 6. Follow the Display Voltage Frequency Switching - Sequence * Before Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 7. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); /* * 8. Set PORT_CLOCK_CTL register PCLK PLL Request * LN<Lane for maxPCLK> to "1" to enable PLL. */ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_request(maxpclk_lane)); /* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), intel_cx0_get_pclk_pll_ack(maxpclk_lane), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US); /* * 10. Follow the Display Voltage Frequency Switching Sequence After * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* TODO: enable TBT-ALT mode */ intel_cx0_phy_transaction_end(encoder, wakeref); } int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 clock; u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK)); switch (clock) { case XELPDP_DDI_CLOCK_SELECT_TBT_162: return 162000; case XELPDP_DDI_CLOCK_SELECT_TBT_270: return 270000; case XELPDP_DDI_CLOCK_SELECT_TBT_540: return 540000; case XELPDP_DDI_CLOCK_SELECT_TBT_810: return 810000; default: MISSING_CASE(clock); return 162000; } } static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock) { switch (clock) { case 162000: return XELPDP_DDI_CLOCK_SELECT_TBT_162; case 270000: return XELPDP_DDI_CLOCK_SELECT_TBT_270; case 540000: return XELPDP_DDI_CLOCK_SELECT_TBT_540; case 810000: return XELPDP_DDI_CLOCK_SELECT_TBT_810; default: MISSING_CASE(clock); return XELPDP_DDI_CLOCK_SELECT_TBT_162; } } static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); u32 val = 0; /* * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock)); val |= XELPDP_FORWARD_CLOCK_UNGATE; intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); /* * 3. Follow the Display Voltage Frequency Switching - Sequence * Before Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. */ val |= XELPDP_TBT_CLOCK_REQUEST; intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val); /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_TBT_CLOCK_ACK, XELPDP_TBT_CLOCK_ACK, 100, 0, NULL)) drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* * 6. Follow the Display Voltage Frequency Switching Sequence After * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 7. Program DDI_CLK_VALFREQ to match intended DDI * clock frequency. */ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); } void intel_mtl_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); if (intel_tc_port_in_tbt_alt_mode(dig_port)) intel_mtl_tbt_pll_enable(encoder, crtc_state); else intel_cx0pll_enable(encoder, crtc_state); } static void intel_cx0pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); bool is_c10 = intel_is_c10phy(i915, phy); intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); /* 1. Change owned PHY lane power to Disable state. */ intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES, is_c10 ? CX0_P2PG_STATE_DISABLE : CX0_P4PG_STATE_DISABLE); /* * 2. Follow the Display Voltage Frequency Switching Sequence Before * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> * to "0" to disable PLL. */ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); /* 4. Program DDI_CLK_VALFREQ to 0. */ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); /* * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". */ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0, XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL)) drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n", phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US); /* * 6. Follow the Display Voltage Frequency Switching Sequence After * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK, 0); intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_FORWARD_CLOCK_UNGATE, 0); intel_cx0_phy_transaction_end(encoder, wakeref); } static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); /* * 1. Follow the Display Voltage Frequency Switching Sequence Before * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. */ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_TBT_CLOCK_REQUEST, 0); /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL)) drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n", encoder->base.base.id, encoder->base.name, phy_name(phy)); /* * 4. Follow the Display Voltage Frequency Switching Sequence After * Frequency Change. We handle this step in bxt_set_cdclk(). */ /* * 5. Program PORT CLOCK CTRL register to disable and gate clocks */ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, 0); /* 6. Program DDI_CLK_VALFREQ to 0. */ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0); } void intel_mtl_pll_disable(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); if (intel_tc_port_in_tbt_alt_mode(dig_port)) intel_mtl_tbt_pll_disable(encoder); else intel_cx0pll_disable(encoder); } enum icl_port_dpll_id intel_mtl_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); /* * TODO: Determine the PLL type from the SW state, once MTL PLL * handling is done via the standard shared DPLL framework. */ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port)); u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) return ICL_PORT_DPLL_MG_PHY; else return ICL_PORT_DPLL_DEFAULT; } void intel_c10pll_state_verify(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_c10pll_state mpllb_hw_state = { 0 }; struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_encoder *encoder; enum phy phy; int i; if (DISPLAY_VER(i915) < 14) return; if (!new_crtc_state->hw.active) return; /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ if (!intel_crtc_needs_modeset(new_crtc_state) && !intel_crtc_needs_fastset(new_crtc_state)) return; encoder = intel_get_crtc_new_encoder(state, new_crtc_state); phy = intel_port_to_phy(i915, encoder->port); if (!intel_is_c10phy(i915, phy)) return; intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state); for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { u8 expected = mpllb_sw_state->pll[i]; I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected, "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", crtc->base.base.id, crtc->base.name, i, expected, mpllb_hw_state.pll[i]); } I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx, "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", crtc->base.base.id, crtc->base.name, mpllb_sw_state->tx, mpllb_hw_state.tx); I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn, "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", crtc->base.base.id, crtc->base.name, mpllb_sw_state->cmn, mpllb_hw_state.cmn); }
linux-master
drivers/gpu/drm/i915/display/intel_cx0_phy.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation * * HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code). */ #include "g4x_hdmi.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_sdvo.h" #include "vlv_sideband.h" static void intel_hdmi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 hdmi_val; intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); hdmi_val = SDVO_ENCODING_HDMI; if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range) hdmi_val |= HDMI_COLOR_RANGE_16_235; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; if (crtc_state->pipe_bpp > 24) hdmi_val |= HDMI_COLOR_FORMAT_12bpc; else hdmi_val |= SDVO_COLOR_FORMAT_8bpc; if (crtc_state->has_hdmi_sink) hdmi_val |= HDMI_MODE_SELECT_HDMI; if (HAS_PCH_CPT(dev_priv)) hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); else if (IS_CHERRYVIEW(dev_priv)) hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe); else hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); intel_wakeref_t wakeref; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe); intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } static bool connector_is_hdmi(struct drm_connector *connector) { struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); return encoder && encoder->type == INTEL_OUTPUT_HDMI; } static bool g4x_compute_has_hdmi_sink(struct intel_atomic_state *state, struct intel_crtc *this_crtc) { const struct drm_connector_state *conn_state; struct drm_connector *connector; int i; /* * On g4x only one HDMI port can transmit infoframes/audio at * any given time. Select the first suitable port for this duty. * * See also g4x_hdmi_connector_atomic_check(). */ for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (!connector_is_hdmi(connector)) continue; crtc = to_intel_crtc(conn_state->crtc); if (!crtc) continue; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state)) continue; return crtc == this_crtc; } return false; } static int g4x_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (HAS_PCH_SPLIT(i915)) crtc_state->has_pch_encoder = true; if (IS_G4X(i915)) crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc); else crtc_state->has_hdmi_sink = intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state); return intel_hdmi_compute_config(encoder, crtc_state, conn_state); } static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; int dotclock; pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); if (tmp & SDVO_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (tmp & SDVO_VSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; if (tmp & HDMI_AUDIO_ENABLE) pipe_config->has_audio = true; if (!HAS_PCH_SPLIT(dev_priv) && tmp & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; pipe_config->hw.adjusted_mode.flags |= flags; if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3); else dotclock = pipe_config->port_clock; if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; pipe_config->hw.adjusted_mode.crtc_clock = dotclock; pipe_config->lane_count = 4; intel_hdmi_read_gcp_infoframe(encoder, pipe_config); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_AVI, &pipe_config->infoframes.avi); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_SPD, &pipe_config->infoframes.spd); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_VENDOR, &pipe_config->infoframes.hdmi); intel_audio_codec_get_config(encoder, pipe_config); } static void g4x_hdmi_enable_port(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) temp |= HDMI_AUDIO_ENABLE; intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } static void g4x_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); g4x_hdmi_enable_port(encoder, pipe_config); drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && !pipe_config->has_hdmi_sink); intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void ibx_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) temp |= HDMI_AUDIO_ENABLE; /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround, need to toggle enable bit off and on * for 12bpc with pixel repeat. * * FIXME: BSpec says this should be done at the end of * the modeset sequence, so not sure if this isn't too soon. */ if (pipe_config->pipe_bpp > 24 && pipe_config->pixel_multiplier > 1) { intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && !pipe_config->has_hdmi_sink); intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void cpt_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); enum pipe pipe = crtc->pipe; u32 temp; temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) temp |= HDMI_AUDIO_ENABLE; /* * WaEnableHDMI8bpcBefore12bpc:snb,ivb * * The procedure for 12bpc is as follows: * 1. disable HDMI clock gating * 2. enable HDMI with 8bpc * 3. enable HDMI with 12bpc * 4. enable HDMI clock gating */ if (pipe_config->pipe_bpp > 24) { intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= SDVO_COLOR_FORMAT_8bpc; } intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); if (pipe_config->pipe_bpp > 24) { temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= HDMI_COLOR_FORMAT_12bpc; intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); } drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && !pipe_config->has_hdmi_sink); intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void vlv_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && !pipe_config->has_hdmi_sink); intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void intel_disable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_digital_port *dig_port = hdmi_to_dig_port(intel_hdmi); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE); intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround for IBX, we need to move the port * to transcoder A after disabling it to allow the * matching DP port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { /* * We get CPU/PCH FIFO underruns on the other pipe when * doing the workaround. Sweep them under the rug. */ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); temp &= ~SDVO_PIPE_SEL_MASK; temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); temp &= ~SDVO_ENABLE; intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } static void g4x_disable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); } static void pch_disable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); } static void pch_post_disable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); } static void intel_hdmi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_hdmi_prepare(encoder, pipe_config); dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); } static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); /* HDMI 1.0V-2dB */ vlv_set_phy_signal_level(encoder, pipe_config, 0x2b245f5f, 0x00002000, 0x5578b83a, 0x2b247878); dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); g4x_hdmi_enable_port(encoder, pipe_config); vlv_wait_port_ready(dev_priv, dig_port, 0x0); } static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_hdmi_prepare(encoder, pipe_config); vlv_phy_pre_pll_enable(encoder, pipe_config); } static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_hdmi_prepare(encoder, pipe_config); chv_phy_pre_pll_enable(encoder, pipe_config); } static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { chv_phy_post_pll_disable(encoder, old_crtc_state); } static void vlv_hdmi_post_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { /* Reset lanes to avoid HDMI flicker (VLV w/a) */ vlv_phy_reset_lanes(encoder, old_crtc_state); } static void chv_hdmi_post_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); vlv_dpio_get(dev_priv); /* Assert data lane reset */ chv_data_lane_soft_reset(encoder, old_crtc_state, true); vlv_dpio_put(dev_priv); } static void chv_hdmi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); chv_phy_pre_encoder_enable(encoder, pipe_config); /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false); dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); g4x_hdmi_enable_port(encoder, pipe_config); vlv_wait_port_ready(dev_priv, dig_port, 0x0); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); } static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { .destroy = intel_encoder_destroy, }; static enum intel_hotplug_state intel_hdmi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { enum intel_hotplug_state state; state = intel_encoder_hotplug(encoder, connector); /* * On many platforms the HDMI live state signal is known to be * unreliable, so we can't use it to detect if a sink is connected or * not. Instead we detect if it's connected based on whether we can * read the EDID or not. That in turn has a problem during disconnect, * since the HPD interrupt may be raised before the DDC lines get * disconnected (due to how the required length of DDC vs. HPD * connector pins are specified) and so we'll still be able to get a * valid EDID. To solve this schedule another detection cycle if this * time around we didn't detect any change in the sink's connection * status. */ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) state = INTEL_HOTPLUG_RETRY; return state; } int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->dev); struct drm_connector_list_iter conn_iter; struct drm_connector *conn; int ret; ret = intel_digital_connector_atomic_check(connector, state); if (ret) return ret; if (!IS_G4X(i915)) return 0; if (!intel_connector_needs_modeset(to_intel_atomic_state(state), connector)) return 0; /* * On g4x only one HDMI port can transmit infoframes/audio * at any given time. Make sure all enabled HDMI ports are * included in the state so that it's possible to select * one of them for this duty. * * See also g4x_compute_has_hdmi_sink(). */ drm_connector_list_iter_begin(&i915->drm, &conn_iter); drm_for_each_connector_iter(conn, &conn_iter) { struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; if (!connector_is_hdmi(conn)) continue; drm_dbg_kms(&i915->drm, "Adding [CONNECTOR:%d:%s]\n", conn->base.id, conn->name); conn_state = drm_atomic_get_connector_state(state, conn); if (IS_ERR(conn_state)) { ret = PTR_ERR(conn_state); break; } crtc = conn_state->crtc; if (!crtc) continue; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); crtc_state->mode_changed = true; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) break; } drm_connector_list_iter_end(&conn_iter); return ret; } static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port) { if (IS_G4X(i915) || IS_VALLEYVIEW(i915)) return port == PORT_B || port == PORT_C; else return port == PORT_B || port == PORT_C || port == PORT_D; } static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port) { return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port), "Platform does not support HDMI %c\n", port_name(port)); } void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; if (!assert_port_valid(dev_priv, port)) return; if (!assert_hdmi_port_valid(dev_priv, port)) return; devdata = intel_bios_encoder_data_lookup(dev_priv, port); /* FIXME bail? */ if (!devdata) drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n", port_name(port)); dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return; dig_port->aux_ch = AUX_CH_NONE; intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(dig_port); return; } intel_encoder = &dig_port->base; intel_encoder->devdata = devdata; mutex_init(&dig_port->hdcp_mutex); drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); intel_encoder->hotplug = intel_hdmi_hotplug; intel_encoder->compute_config = g4x_hdmi_compute_config; if (HAS_PCH_SPLIT(dev_priv)) { intel_encoder->disable = pch_disable_hdmi; intel_encoder->post_disable = pch_post_disable_hdmi; } else { intel_encoder->disable = g4x_disable_hdmi; } intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_CHERRYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable; intel_encoder->pre_enable = chv_hdmi_pre_enable; intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = chv_hdmi_post_disable; intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable; } else if (IS_VALLEYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; intel_encoder->pre_enable = vlv_hdmi_pre_enable; intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = vlv_hdmi_post_disable; } else { intel_encoder->pre_enable = intel_hdmi_pre_enable; if (HAS_PCH_CPT(dev_priv)) intel_encoder->enable = cpt_enable_hdmi; else if (HAS_PCH_IBX(dev_priv)) intel_encoder->enable = ibx_enable_hdmi; else intel_encoder->enable = g4x_enable_hdmi; } intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); intel_encoder->port = port; if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) intel_encoder->pipe_mask = BIT(PIPE_C); else intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG); intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); /* * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems * to work on real hardware. And since g4x can send infoframes to * only one port anyway, nothing is lost by allowing it. */ if (IS_G4X(dev_priv)) intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI); dig_port->hdmi.hdmi_reg = hdmi_reg; dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = 4; intel_infoframe_init(dig_port); intel_hdmi_init_connector(dig_port, intel_connector); }
linux-master
drivers/gpu/drm/i915/display/g4x_hdmi.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic_plane.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_psr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" #include "pxp/intel_pxp.h" static const u32 skl_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_XYUV8888, }; static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, DRM_FORMAT_XYUV8888, }; static const u32 glk_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, DRM_FORMAT_XYUV8888, DRM_FORMAT_P010, DRM_FORMAT_P012, DRM_FORMAT_P016, }; static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, }; static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, DRM_FORMAT_P010, DRM_FORMAT_P012, DRM_FORMAT_P016, DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, }; static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_ARGB16161616F, DRM_FORMAT_ABGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, DRM_FORMAT_P010, DRM_FORMAT_P012, DRM_FORMAT_P016, DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, DRM_FORMAT_XVYU12_16161616, DRM_FORMAT_XVYU16161616, }; int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) { switch (format) { case PLANE_CTL_FORMAT_RGB_565: return DRM_FORMAT_RGB565; case PLANE_CTL_FORMAT_NV12: return DRM_FORMAT_NV12; case PLANE_CTL_FORMAT_XYUV: return DRM_FORMAT_XYUV8888; case PLANE_CTL_FORMAT_P010: return DRM_FORMAT_P010; case PLANE_CTL_FORMAT_P012: return DRM_FORMAT_P012; case PLANE_CTL_FORMAT_P016: return DRM_FORMAT_P016; case PLANE_CTL_FORMAT_Y210: return DRM_FORMAT_Y210; case PLANE_CTL_FORMAT_Y212: return DRM_FORMAT_Y212; case PLANE_CTL_FORMAT_Y216: return DRM_FORMAT_Y216; case PLANE_CTL_FORMAT_Y410: return DRM_FORMAT_XVYU2101010; case PLANE_CTL_FORMAT_Y412: return DRM_FORMAT_XVYU12_16161616; case PLANE_CTL_FORMAT_Y416: return DRM_FORMAT_XVYU16161616; default: case PLANE_CTL_FORMAT_XRGB_8888: if (rgb_order) { if (alpha) return DRM_FORMAT_ABGR8888; else return DRM_FORMAT_XBGR8888; } else { if (alpha) return DRM_FORMAT_ARGB8888; else return DRM_FORMAT_XRGB8888; } case PLANE_CTL_FORMAT_XRGB_2101010: if (rgb_order) { if (alpha) return DRM_FORMAT_ABGR2101010; else return DRM_FORMAT_XBGR2101010; } else { if (alpha) return DRM_FORMAT_ARGB2101010; else return DRM_FORMAT_XRGB2101010; } case PLANE_CTL_FORMAT_XRGB_16161616F: if (rgb_order) { if (alpha) return DRM_FORMAT_ABGR16161616F; else return DRM_FORMAT_XBGR16161616F; } else { if (alpha) return DRM_FORMAT_ARGB16161616F; else return DRM_FORMAT_XRGB16161616F; } } } static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915)) return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3); else return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5); } bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) { return DISPLAY_VER(dev_priv) >= 11 && icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id); } u8 icl_hdr_plane_mask(void) { return BIT(PLANE_PRIMARY) | BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1); } bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) { return DISPLAY_VER(dev_priv) >= 11 && icl_hdr_plane_mask() & BIT(plane_id); } static int icl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); /* two pixels per clock */ return DIV_ROUND_UP(pixel_rate, 2); } static void glk_plane_ratio(const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { const struct drm_framebuffer *fb = plane_state->hw.fb; if (fb->format->cpp[0] == 8) { *num = 10; *den = 8; } else { *num = 1; *den = 1; } } static int glk_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); unsigned int num, den; glk_plane_ratio(plane_state, &num, &den); /* two pixels per clock */ return DIV_ROUND_UP(pixel_rate * num, 2 * den); } static void skl_plane_ratio(const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { const struct drm_framebuffer *fb = plane_state->hw.fb; if (fb->format->cpp[0] == 8) { *num = 9; *den = 8; } else { *num = 1; *den = 1; } } static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); unsigned int num, den; skl_plane_ratio(plane_state, &num, &den); return DIV_ROUND_UP(pixel_rate * num, den); } static int skl_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { int cpp = fb->format->cpp[color_plane]; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: /* * Validated limit is 4k, but has 5k should * work apart from the following features: * - Ytile (already limited to 4k) * - FP16 (already limited to 4k) * - render compression (already limited to 4k) * - KVMR sprite and cursor (don't care) * - horizontal panning (TODO verify this) * - pipe and plane scaling (TODO verify this) */ if (cpp == 8) return 4096; else return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: if (cpp == 8) return 2048; else return 4096; default: MISSING_CASE(fb->modifier); return 2048; } } static int glk_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { int cpp = fb->format->cpp[color_plane]; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: if (cpp == 8) return 4096; else return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: /* FIXME AUX plane? */ case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: if (cpp == 8) return 2048; else return 5120; default: MISSING_CASE(fb->modifier); return 2048; } } static int icl_plane_min_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { /* Wa_14011264657, Wa_14011050563: gen11+ */ switch (fb->format->format) { case DRM_FORMAT_C8: return 18; case DRM_FORMAT_RGB565: return 10; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XVYU2101010: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: return 6; case DRM_FORMAT_NV12: return 20; case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: return 12; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: return 4; default: return 1; } } static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) return 4096; else return 5120; } static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { return 5120; } static int skl_plane_max_height(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { return 4096; } static int icl_plane_max_height(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { return 4320; } static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; int max_horizontal_pixels = 8192; int max_stride_bytes; if (DISPLAY_VER(i915) >= 13) { /* * The stride in bytes must not exceed of the size * of 128K bytes. For pixel formats of 64bpp will allow * for a 16K pixel surface. */ max_stride_bytes = 131072; if (cpp == 8) max_horizontal_pixels = 16384; else max_horizontal_pixels = 65536; } else { /* * "The stride in bytes must not exceed the * of the size of 8K pixels and 32K bytes." */ max_stride_bytes = 32768; } if (drm_rotation_90_or_270(rotation)) return min(max_horizontal_pixels, max_stride_bytes / cpp); else return min(max_horizontal_pixels * cpp, max_stride_bytes); } /* Preoffset values for YUV to RGB Conversion */ #define PREOFF_YUV_TO_RGB_HI 0x1800 #define PREOFF_YUV_TO_RGB_ME 0x0000 #define PREOFF_YUV_TO_RGB_LO 0x1800 #define ROFF(x) (((x) & 0xffff) << 16) #define GOFF(x) (((x) & 0xffff) << 0) #define BOFF(x) (((x) & 0xffff) << 16) /* * Programs the input color space conversion stage for ICL HDR planes. * Note that it is assumed that this stage always happens after YUV * range correction. Thus, the input to this stage is assumed to be * in full-range YCbCr. */ static void icl_program_input_csc(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; static const u16 input_csc_matrix[][9] = { /* * BT.601 full range YCbCr -> full range RGB * The matrix required is : * [1.000, 0.000, 1.371, * 1.000, -0.336, -0.698, * 1.000, 1.732, 0.0000] */ [DRM_COLOR_YCBCR_BT601] = { 0x7AF8, 0x7800, 0x0, 0x8B28, 0x7800, 0x9AC0, 0x0, 0x7800, 0x7DD8, }, /* * BT.709 full range YCbCr -> full range RGB * The matrix required is : * [1.000, 0.000, 1.574, * 1.000, -0.187, -0.468, * 1.000, 1.855, 0.0000] */ [DRM_COLOR_YCBCR_BT709] = { 0x7C98, 0x7800, 0x0, 0x9EF8, 0x7800, 0xAC00, 0x0, 0x7800, 0x7ED8, }, /* * BT.2020 full range YCbCr -> full range RGB * The matrix required is : * [1.000, 0.000, 1.474, * 1.000, -0.1645, -0.5713, * 1.000, 1.8814, 0.0000] */ [DRM_COLOR_YCBCR_BT2020] = { 0x7BC8, 0x7800, 0x0, 0x8928, 0x7800, 0xAA88, 0x0, 0x7800, 0x7F10, }, }; const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | GOFF(csc[1])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) | GOFF(csc[4])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) | GOFF(csc[7])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8])); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), PREOFF_YUV_TO_RGB_ME); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), PREOFF_YUV_TO_RGB_LO); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); } static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { /* * The stride is either expressed as a multiple of 64 bytes chunks for * linear buffers or in number of tiles for tiled buffers. */ if (is_surface_linear(fb, color_plane)) return 64; else if (drm_rotation_90_or_270(rotation)) return intel_tile_height(fb, color_plane); else return intel_tile_width_bytes(fb, color_plane); } static u32 skl_plane_stride(const struct intel_plane_state *plane_state, int color_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; u32 stride = plane_state->view.color_plane[color_plane].scanout_stride; if (color_plane >= fb->format->num_planes) return 0; return stride / skl_plane_stride_mult(fb, color_plane, rotation); } static void skl_plane_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; skl_write_plane_wm(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } static void icl_plane_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); skl_write_plane_wm(plane, crtc_state); intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } static bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static u32 skl_plane_ctl_format(u32 pixel_format) { switch (pixel_format) { case DRM_FORMAT_C8: return PLANE_CTL_FORMAT_INDEXED; case DRM_FORMAT_RGB565: return PLANE_CTL_FORMAT_RGB_565; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: return PLANE_CTL_FORMAT_XRGB_8888; case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: return PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: return PLANE_CTL_FORMAT_XRGB_16161616F; case DRM_FORMAT_XYUV8888: return PLANE_CTL_FORMAT_XYUV; case DRM_FORMAT_YUYV: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV; case DRM_FORMAT_YVYU: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU; case DRM_FORMAT_UYVY: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY; case DRM_FORMAT_VYUY: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; case DRM_FORMAT_P010: return PLANE_CTL_FORMAT_P010; case DRM_FORMAT_P012: return PLANE_CTL_FORMAT_P012; case DRM_FORMAT_P016: return PLANE_CTL_FORMAT_P016; case DRM_FORMAT_Y210: return PLANE_CTL_FORMAT_Y210; case DRM_FORMAT_Y212: return PLANE_CTL_FORMAT_Y212; case DRM_FORMAT_Y216: return PLANE_CTL_FORMAT_Y216; case DRM_FORMAT_XVYU2101010: return PLANE_CTL_FORMAT_Y410; case DRM_FORMAT_XVYU12_16161616: return PLANE_CTL_FORMAT_Y412; case DRM_FORMAT_XVYU16161616: return PLANE_CTL_FORMAT_Y416; default: MISSING_CASE(pixel_format); } return 0; } static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) { if (!plane_state->hw.fb->format->has_alpha) return PLANE_CTL_ALPHA_DISABLE; switch (plane_state->hw.pixel_blend_mode) { case DRM_MODE_BLEND_PIXEL_NONE: return PLANE_CTL_ALPHA_DISABLE; case DRM_MODE_BLEND_PREMULTI: return PLANE_CTL_ALPHA_SW_PREMULTIPLY; case DRM_MODE_BLEND_COVERAGE: return PLANE_CTL_ALPHA_HW_PREMULTIPLY; default: MISSING_CASE(plane_state->hw.pixel_blend_mode); return PLANE_CTL_ALPHA_DISABLE; } } static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) { if (!plane_state->hw.fb->format->has_alpha) return PLANE_COLOR_ALPHA_DISABLE; switch (plane_state->hw.pixel_blend_mode) { case DRM_MODE_BLEND_PIXEL_NONE: return PLANE_COLOR_ALPHA_DISABLE; case DRM_MODE_BLEND_PREMULTI: return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; case DRM_MODE_BLEND_COVERAGE: return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; default: MISSING_CASE(plane_state->hw.pixel_blend_mode); return PLANE_COLOR_ALPHA_DISABLE; } } static u32 skl_plane_ctl_tiling(u64 fb_modifier) { switch (fb_modifier) { case DRM_FORMAT_MOD_LINEAR: break; case I915_FORMAT_MOD_X_TILED: return PLANE_CTL_TILED_X; case I915_FORMAT_MOD_Y_TILED: return PLANE_CTL_TILED_Y; case I915_FORMAT_MOD_4_TILED: return PLANE_CTL_TILED_4; case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; default: MISSING_CASE(fb_modifier); } return 0; } static u32 skl_plane_ctl_rotate(unsigned int rotate) { switch (rotate) { case DRM_MODE_ROTATE_0: break; /* * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr * while i915 HW rotation is clockwise, thats why this swapping. */ case DRM_MODE_ROTATE_90: return PLANE_CTL_ROTATE_270; case DRM_MODE_ROTATE_180: return PLANE_CTL_ROTATE_180; case DRM_MODE_ROTATE_270: return PLANE_CTL_ROTATE_90; default: MISSING_CASE(rotate); } return 0; } static u32 icl_plane_ctl_flip(unsigned int reflect) { switch (reflect) { case 0: break; case DRM_MODE_REFLECT_X: return PLANE_CTL_FLIP_HORIZONTAL; case DRM_MODE_REFLECT_Y: default: MISSING_CASE(reflect); } return 0; } static u32 adlp_plane_ctl_arb_slots(const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { switch (fb->format->cpp[0]) { case 2: return PLANE_CTL_ARB_SLOTS(1); default: return PLANE_CTL_ARB_SLOTS(0); } } else { switch (fb->format->cpp[0]) { case 8: return PLANE_CTL_ARB_SLOTS(3); case 4: return PLANE_CTL_ARB_SLOTS(1); default: return PLANE_CTL_ARB_SLOTS(0); } } } static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 plane_ctl = 0; if (DISPLAY_VER(dev_priv) >= 10) return plane_ctl; if (crtc_state->gamma_enable) plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; return plane_ctl; } static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 plane_ctl; plane_ctl = PLANE_CTL_ENABLE; if (DISPLAY_VER(dev_priv) < 10) { plane_ctl |= skl_plane_ctl_alpha(plane_state); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; } plane_ctl |= skl_plane_ctl_format(fb->format->format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); if (DISPLAY_VER(dev_priv) >= 11) plane_ctl |= icl_plane_ctl_flip(rotation & DRM_MODE_REFLECT_MASK); if (key->flags & I915_SET_COLORKEY_DESTINATION) plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; else if (key->flags & I915_SET_COLORKEY_SOURCE) plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; /* Wa_22012358565:adl-p */ if (DISPLAY_VER(dev_priv) == 13) plane_ctl |= adlp_plane_ctl_arb_slots(plane_state); return plane_ctl; } static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 plane_color_ctl = 0; if (DISPLAY_VER(dev_priv) >= 11) return plane_color_ctl; if (crtc_state->gamma_enable) plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; return plane_color_ctl; } static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); u32 plane_color_ctl = 0; plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { switch (plane_state->hw.color_encoding) { case DRM_COLOR_YCBCR_BT709: plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; break; case DRM_COLOR_YCBCR_BT2020: plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; break; default: plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; } if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } else if (fb->format->is_yuv) { plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } if (plane_state->force_black) plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; return plane_color_ctl; } static u32 skl_surf_address(const struct intel_plane_state *plane_state, int color_plane) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; u32 offset = plane_state->view.color_plane[color_plane].offset; if (intel_fb_uses_dpt(fb)) { /* * The DPT object contains only one vma, so the VMA's offset * within the DPT is always 0. */ drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start); drm_WARN_ON(&i915->drm, offset & 0x1fffff); return offset >> 9; } else { drm_WARN_ON(&i915->drm, offset & 0xfff); return offset; } } static u32 skl_plane_surf(const struct intel_plane_state *plane_state, int color_plane) { u32 plane_surf; plane_surf = intel_plane_ggtt_offset(plane_state) + skl_surf_address(plane_state, color_plane); if (plane_state->decrypt) plane_surf |= PLANE_SURF_DECRYPT; return plane_surf; } static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state, int color_plane) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; int aux_plane = skl_main_to_aux_plane(fb, color_plane); u32 aux_dist; if (!aux_plane) return 0; aux_dist = skl_surf_address(plane_state, aux_plane) - skl_surf_address(plane_state, color_plane); if (DISPLAY_VER(i915) < 12) aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane)); return aux_dist; } static u32 skl_plane_keyval(const struct intel_plane_state *plane_state) { const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; return key->min_value; } static u32 skl_plane_keymax(const struct intel_plane_state *plane_state) { const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u8 alpha = plane_state->hw.alpha >> 8; return (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); } static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) { const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u8 alpha = plane_state->hw.alpha >> 8; u32 keymsk; keymsk = key->channel_mask & 0x7ffffff; if (alpha < 0xff) keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; return keymsk; } static void icl_plane_csc_load_black(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static int icl_plane_color_plane(const struct intel_plane_state *plane_state) { /* Program the UV plane on planar master */ if (plane_state->planar_linked_plane && !plane_state->planar_slave) return 1; else return 0; } static void skl_plane_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 stride = skl_plane_stride(plane_state, 0); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { crtc_x = 0; crtc_y = 0; } intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), PLANE_STRIDE_(stride)); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); skl_write_plane_wm(plane, crtc_state); } static void skl_plane_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; u32 plane_ctl, plane_color_ctl = 0; plane_ctl = plane_state->ctl | skl_plane_ctl_crtc(crtc_state); if (DISPLAY_VER(dev_priv) >= 10) plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), skl_plane_aux_dist(plane_state, 0)); intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); /* * Enable the scaler before the plane so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. * * TODO: split into noarm+arm pair */ if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), skl_plane_surf(plane_state, 0)); } static void icl_plane_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; int color_plane = icl_plane_color_plane(plane_state); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; int x = plane_state->view.color_plane[color_plane].x; int y = plane_state->view.color_plane[color_plane].y; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u32 plane_color_ctl; plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { crtc_x = 0; crtc_y = 0; } intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), PLANE_STRIDE_(stride)); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), lower_32_bits(plane_state->ccval)); intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), upper_32_bits(plane_state->ccval)); } /* FLAT CCS doesn't need to program AUX_DIST */ if (!HAS_FLAT_CCS(dev_priv)) intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), skl_plane_aux_dist(plane_state, color_plane)); if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); skl_write_plane_wm(plane, crtc_state); /* * FIXME: pxp session invalidation can hit any time even at time of commit * or after the commit, display content will be garbage. */ if (plane_state->force_black) icl_plane_csc_load_black(plane); intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); } static void icl_plane_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; int color_plane = icl_plane_color_plane(plane_state); u32 plane_ctl; plane_ctl = plane_state->ctl | skl_plane_ctl_crtc(crtc_state); /* * Enable the scaler before the plane so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. * * TODO: split into noarm+arm pair */ if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), skl_plane_surf(plane_state, color_plane)); } static void skl_plane_async_flip(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 plane_ctl = plane_state->ctl; plane_ctl |= skl_plane_ctl_crtc(crtc_state); if (async_flip) plane_ctl |= PLANE_CTL_ASYNC_FLIP; intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), skl_plane_surf(plane_state, 0)); } static bool intel_format_is_p01x(u32 format) { switch (format) { case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: return true; default: return false; } } static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; if (!fb) return 0; if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && intel_fb_is_ccs_modifier(fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "RC support only with 0/180 degree rotation (%x)\n", rotation); return -EINVAL; } if (rotation & DRM_MODE_REFLECT_X && fb->modifier == DRM_FORMAT_MOD_LINEAR) { drm_dbg_kms(&dev_priv->drm, "horizontal flip is not supported with linear surface formats\n"); return -EINVAL; } if (drm_rotation_90_or_270(rotation)) { if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) { drm_dbg_kms(&dev_priv->drm, "Y/Yf tiling required for 90/270!\n"); return -EINVAL; } /* * 90/270 is not allowed with RGB64 16:16:16:16 and * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. */ switch (fb->format->format) { case DRM_FORMAT_RGB565: if (DISPLAY_VER(dev_priv) >= 11) break; fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: drm_dbg_kms(&dev_priv->drm, "Unsupported pixel format %p4cc for 90/270!\n", &fb->format->format); return -EINVAL; default: break; } } /* Y-tiling is not supported in IF-ID Interlace mode */ if (crtc_state->hw.enable && crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && fb->modifier != DRM_FORMAT_MOD_LINEAR && fb->modifier != I915_FORMAT_MOD_X_TILED) { drm_dbg_kms(&dev_priv->drm, "Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } /* Wa_1606054188:tgl,adl-s */ if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) && plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE && intel_format_is_p01x(fb->format->format)) { drm_dbg_kms(&dev_priv->drm, "Source color keying not supported with P01x formats\n"); return -EINVAL; } return 0; } static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); int crtc_x = plane_state->uapi.dst.x1; int crtc_w = drm_rect_width(&plane_state->uapi.dst); int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); /* * Display WA #1175: glk * Planes other than the cursor may cause FIFO underflow and display * corruption if starting less than 4 pixels from the right edge of * the screen. * Besides the above WA fix the similar problem, where planes other * than the cursor ending less than 4 pixels from the left edge of the * screen may cause FIFO underflow and display corruption. */ if (DISPLAY_VER(dev_priv) == 10 && (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) { drm_dbg_kms(&dev_priv->drm, "requested plane X %s position %d invalid (valid range %d-%d)\n", crtc_x + crtc_w < 4 ? "end" : "start", crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x, 4, pipe_src_w - 4); return -ERANGE; } return 0; } static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; /* Display WA #1106 */ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { drm_dbg_kms(&i915->drm, "src width must be multiple of 4 for rotated planar YUV\n"); return -EINVAL; } return 0; } static int skl_plane_max_scale(struct drm_i915_private *dev_priv, const struct drm_framebuffer *fb) { /* * We don't yet know the final source width nor * whether we can use the HQ scaler mode. Assume * the best case. * FIXME need to properly check this later. */ if (DISPLAY_VER(dev_priv) >= 10 || !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) return 0x30000 - 1; else return 0x20000 - 1; } static int intel_plane_min_width(struct intel_plane *plane, const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { if (plane->min_width) return plane->min_width(fb, color_plane, rotation); else return 1; } static int intel_plane_max_width(struct intel_plane *plane, const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { if (plane->max_width) return plane->max_width(fb, color_plane, rotation); else return INT_MAX; } static int intel_plane_max_height(struct intel_plane *plane, const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) { if (plane->max_height) return plane->max_height(fb, color_plane, rotation); else return INT_MAX; } static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, int main_x, int main_y, u32 main_offset, int ccs_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; int aux_x = plane_state->view.color_plane[ccs_plane].x; int aux_y = plane_state->view.color_plane[ccs_plane].y; u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; u32 alignment = intel_surf_alignment(fb, ccs_plane); int hsub; int vsub; intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); while (aux_offset >= main_offset && aux_y <= main_y) { int x, y; if (aux_x == main_x && aux_y == main_y) break; if (aux_offset == 0) break; x = aux_x / hsub; y = aux_y / vsub; aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, ccs_plane, aux_offset, aux_offset - alignment); aux_x = x * hsub + aux_x % hsub; aux_y = y * vsub + aux_y % vsub; } if (aux_x != main_x || aux_y != main_y) return false; plane_state->view.color_plane[ccs_plane].offset = aux_offset; plane_state->view.color_plane[ccs_plane].x = aux_x; plane_state->view.color_plane[ccs_plane].y = aux_y; return true; } int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, int *x, int *y, u32 *offset) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const int aux_plane = skl_main_to_aux_plane(fb, 0); const u32 aux_offset = plane_state->view.color_plane[aux_plane].offset; const u32 alignment = intel_surf_alignment(fb, 0); const int w = drm_rect_width(&plane_state->uapi.src) >> 16; intel_add_fb_offsets(x, y, plane_state, 0); *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0); if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) return -EINVAL; /* * AUX surface offset is specified as the distance from the * main surface offset, and it must be non-negative. Make * sure that is what we will get. */ if (aux_plane && *offset > aux_offset) *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, *offset, aux_offset & ~(alignment - 1)); /* * When using an X-tiled surface, the plane blows up * if the x offset + width exceed the stride. * * TODO: linear and Y-tiled seem fine, Yf untested, */ if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = fb->format->cpp[0]; while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (*offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); return -EINVAL; } *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, *offset, *offset - alignment); } } return 0; } static int skl_check_main_surface(struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const unsigned int rotation = plane_state->hw.rotation; int x = plane_state->uapi.src.x1 >> 16; int y = plane_state->uapi.src.y1 >> 16; const int w = drm_rect_width(&plane_state->uapi.src) >> 16; const int h = drm_rect_height(&plane_state->uapi.src) >> 16; const int min_width = intel_plane_min_width(plane, fb, 0, rotation); const int max_width = intel_plane_max_width(plane, fb, 0, rotation); const int max_height = intel_plane_max_height(plane, fb, 0, rotation); const int aux_plane = skl_main_to_aux_plane(fb, 0); const u32 alignment = intel_surf_alignment(fb, 0); u32 offset; int ret; if (w > max_width || w < min_width || h > max_height || h < 1) { drm_dbg_kms(&dev_priv->drm, "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", w, h, min_width, max_width, max_height); return -EINVAL; } ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); if (ret) return ret; /* * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. On DG2 * there's no aux plane on fb so skip this checking. */ if (intel_fb_is_ccs_modifier(fb->modifier) && aux_plane) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset, aux_plane)) { if (offset == 0) break; offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, offset, offset - alignment); } if (x != plane_state->view.color_plane[aux_plane].x || y != plane_state->view.color_plane[aux_plane].y) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to CCS\n"); return -EINVAL; } } if (DISPLAY_VER(dev_priv) >= 13) drm_WARN_ON(&dev_priv->drm, x > 65535 || y > 65535); else drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191); plane_state->view.color_plane[0].offset = offset; plane_state->view.color_plane[0].x = x; plane_state->view.color_plane[0].y = y; /* * Put the final coordinates back so that the src * coordinate checks will see the right values. */ drm_rect_translate_to(&plane_state->uapi.src, x << 16, y << 16); return 0; } static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int uv_plane = 1; int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ? skl_main_to_aux_plane(fb, uv_plane) : 0; int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); int x = plane_state->uapi.src.x1 >> 17; int y = plane_state->uapi.src.y1 >> 17; int w = drm_rect_width(&plane_state->uapi.src) >> 17; int h = drm_rect_height(&plane_state->uapi.src) >> 17; u32 offset; /* FIXME not quite sure how/if these apply to the chroma plane */ if (w > max_width || h > max_height) { drm_dbg_kms(&i915->drm, "CbCr source size %dx%d too big (limit %dx%d)\n", w, h, max_width, max_height); return -EINVAL; } intel_add_fb_offsets(&x, &y, plane_state, uv_plane); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, uv_plane); if (ccs_plane) { u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; u32 alignment = intel_surf_alignment(fb, uv_plane); if (offset > aux_offset) offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, uv_plane, offset, aux_offset & ~(alignment - 1)); while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset, ccs_plane)) { if (offset == 0) break; offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, uv_plane, offset, offset - alignment); } if (x != plane_state->view.color_plane[ccs_plane].x || y != plane_state->view.color_plane[ccs_plane].y) { drm_dbg_kms(&i915->drm, "Unable to find suitable display surface offset due to CCS\n"); return -EINVAL; } } if (DISPLAY_VER(i915) >= 13) drm_WARN_ON(&i915->drm, x > 65535 || y > 65535); else drm_WARN_ON(&i915->drm, x > 8191 || y > 8191); plane_state->view.color_plane[uv_plane].offset = offset; plane_state->view.color_plane[uv_plane].x = x; plane_state->view.color_plane[uv_plane].y = y; return 0; } static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; int src_x = plane_state->uapi.src.x1 >> 16; int src_y = plane_state->uapi.src.y1 >> 16; u32 offset; int ccs_plane; for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { int main_hsub, main_vsub; int hsub, vsub; int x, y; if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) continue; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, skl_ccs_to_main_plane(fb, ccs_plane)); intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); hsub *= main_hsub; vsub *= main_vsub; x = src_x / hsub; y = src_y / vsub; intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, ccs_plane); plane_state->view.color_plane[ccs_plane].offset = offset; plane_state->view.color_plane[ccs_plane].x = (x * hsub + src_x % hsub) / main_hsub; plane_state->view.color_plane[ccs_plane].y = (y * vsub + src_y % vsub) / main_vsub; } return 0; } static int skl_check_plane_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; ret = intel_plane_compute_gtt(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; /* * Handle the AUX surface first since the main surface setup depends on * it. */ if (intel_fb_is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; } if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; } ret = skl_check_main_surface(plane_state); if (ret) return ret; return 0; } static bool skl_fb_scalable(const struct drm_framebuffer *fb) { if (!fb) return false; switch (fb->format->format) { case DRM_FORMAT_C8: return false; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: return DISPLAY_VER(to_i915(fb->dev)) >= 11; default: return true; } } static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); return intel_pxp_key_check(i915->pxp, obj, false) == 0; } static bool pxp_is_borked(struct drm_i915_gem_object *obj) { return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); } static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; int min_scale = DRM_PLANE_NO_SCALING; int max_scale = DRM_PLANE_NO_SCALING; int ret; ret = skl_plane_check_fb(crtc_state, plane_state); if (ret) return ret; /* use scaler when colorkey is not required */ if (!plane_state->ckey.flags && skl_fb_scalable(fb)) { min_scale = 1; max_scale = skl_plane_max_scale(dev_priv, fb); } ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, min_scale, max_scale, true); if (ret) return ret; ret = skl_check_plane_surface(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; ret = skl_plane_check_dst_coordinates(crtc_state, plane_state); if (ret) return ret; ret = intel_plane_check_src_coordinates(plane_state); if (ret) return ret; ret = skl_plane_check_nv12_rotation(plane_state); if (ret) return ret; if (DISPLAY_VER(dev_priv) >= 11) { plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); } /* HW only has 8 bits pixel precision, disable plane if invisible */ if (!(plane_state->hw.alpha >> 8)) plane_state->uapi.visible = false; plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); if (DISPLAY_VER(dev_priv) >= 10) plane_state->color_ctl = glk_plane_color_ctl(crtc_state, plane_state); if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && icl_is_hdr_plane(dev_priv, plane->id)) /* Enable and use MPEG-2 chroma siting */ plane_state->cus_ctl = PLANE_CUS_ENABLE | PLANE_CUS_HPHASE_0 | PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25; else plane_state->cus_ctl = 0; return 0; } static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) { return pipe - PIPE_A + INTEL_FBC_A; } static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id, enum plane_id plane_id) { if ((DISPLAY_RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) return false; return plane_id == PLANE_PRIMARY; } static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe); if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id)) return dev_priv->display.fbc[fbc_id]; else return NULL; } static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { /* Display WA #0870: skl, bxt */ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) return false; if (DISPLAY_VER(dev_priv) == 9 && pipe == PIPE_C) return false; if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) return false; return true; } static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id, int *num_formats) { if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { *num_formats = ARRAY_SIZE(skl_planar_formats); return skl_planar_formats; } else { *num_formats = ARRAY_SIZE(skl_plane_formats); return skl_plane_formats; } } static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id, int *num_formats) { if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { *num_formats = ARRAY_SIZE(glk_planar_formats); return glk_planar_formats; } else { *num_formats = ARRAY_SIZE(skl_plane_formats); return skl_plane_formats; } } static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id, int *num_formats) { if (icl_is_hdr_plane(dev_priv, plane_id)) { *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); return icl_hdr_plane_formats; } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) { *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); return icl_sdr_y_plane_formats; } else { *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats); return icl_sdr_uv_plane_formats; } } static bool skl_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: case DRM_FORMAT_XVYU2101010: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) return true; fallthrough; default: return false; } } static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: if (intel_fb_is_mc_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XVYU2101010: case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: if (!intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; default: return false; } } static const struct drm_plane_funcs skl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = skl_plane_format_mod_supported, }; static const struct drm_plane_funcs gen12_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = gen12_plane_format_mod_supported, }; static void skl_plane_enable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } static void skl_plane_disable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, enum pipe pipe, enum plane_id plane_id) { /* Wa_14017240301 */ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) return false; /* Wa_22011186057 */ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) return false; if (DISPLAY_VER(i915) >= 11) return true; if (IS_GEMINILAKE(i915)) return pipe != PIPE_C; return pipe != PIPE_C && (plane_id == PLANE_PRIMARY || plane_id == PLANE_SPRITE0); } static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, enum plane_id plane_id) { if (DISPLAY_VER(i915) < 12) return false; /* Wa_14010477008 */ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0))) return false; /* Wa_22011186057 */ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) return false; /* Wa_14013215631 */ if (IS_DG2_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) return false; return plane_id < PLANE_SPRITE4; } static u8 skl_get_plane_caps(struct drm_i915_private *i915, enum pipe pipe, enum plane_id plane_id) { u8 caps = INTEL_PLANE_CAP_TILING_X; if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915)) caps |= INTEL_PLANE_CAP_TILING_Y; if (DISPLAY_VER(i915) < 12) caps |= INTEL_PLANE_CAP_TILING_Yf; if (HAS_4TILE(i915)) caps |= INTEL_PLANE_CAP_TILING_4; if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { caps |= INTEL_PLANE_CAP_CCS_RC; if (DISPLAY_VER(i915) >= 12) caps |= INTEL_PLANE_CAP_CCS_RC_CC; } if (gen12_plane_has_mc_ccs(i915, plane_id)) caps |= INTEL_PLANE_CAP_CCS_MC; return caps; } struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { const struct drm_plane_funcs *plane_funcs; struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; unsigned int supported_csc; const u64 *modifiers; const u32 *formats; int num_formats; int ret; plane = intel_plane_alloc(); if (IS_ERR(plane)) return plane; plane->pipe = pipe; plane->id = plane_id; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; if (icl_is_hdr_plane(dev_priv, plane_id)) plane->max_width = icl_hdr_plane_max_width; else plane->max_width = icl_sdr_plane_max_width; plane->max_height = icl_plane_max_height; plane->min_cdclk = icl_plane_min_cdclk; } else if (DISPLAY_VER(dev_priv) >= 10) { plane->max_width = glk_plane_max_width; plane->max_height = skl_plane_max_height; plane->min_cdclk = glk_plane_min_cdclk; } else { plane->max_width = skl_plane_max_width; plane->max_height = skl_plane_max_height; plane->min_cdclk = skl_plane_min_cdclk; } plane->max_stride = skl_plane_max_stride; if (DISPLAY_VER(dev_priv) >= 11) { plane->update_noarm = icl_plane_update_noarm; plane->update_arm = icl_plane_update_arm; plane->disable_arm = icl_plane_disable_arm; } else { plane->update_noarm = skl_plane_update_noarm; plane->update_arm = skl_plane_update_arm; plane->disable_arm = skl_plane_disable_arm; } plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; if (plane_id == PLANE_PRIMARY) { plane->need_async_flip_disable_wa = IS_DISPLAY_VER(dev_priv, 9, 10); plane->async_flip = skl_plane_async_flip; plane->enable_flip_done = skl_plane_enable_flip_done; plane->disable_flip_done = skl_plane_disable_flip_done; } if (DISPLAY_VER(dev_priv) >= 11) formats = icl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); else if (DISPLAY_VER(dev_priv) >= 10) formats = glk_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); else formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); if (DISPLAY_VER(dev_priv) >= 12) plane_funcs = &gen12_plane_funcs; else plane_funcs = &skl_plane_funcs; if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; else plane_type = DRM_PLANE_TYPE_OVERLAY; modifiers = intel_fb_plane_get_modifiers(dev_priv, skl_get_plane_caps(dev_priv, pipe, plane_id)); ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, pipe_name(pipe)); kfree(modifiers); if (ret) goto fail; if (DISPLAY_VER(dev_priv) >= 13) supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; else supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; if (DISPLAY_VER(dev_priv) >= 11) supported_rotations |= DRM_MODE_REFLECT_X; drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, supported_rotations); supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); if (DISPLAY_VER(dev_priv) >= 10) supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020); drm_plane_create_color_properties(&plane->base, supported_csc, BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); drm_plane_create_alpha_property(&plane->base); drm_plane_create_blend_mode_property(&plane->base, BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE)); drm_plane_create_zpos_immutable_property(&plane->base, plane_id); if (DISPLAY_VER(dev_priv) >= 12) drm_plane_enable_fb_damage_clips(&plane->base); if (DISPLAY_VER(dev_priv) >= 11) drm_plane_create_scaling_filter_property(&plane->base, BIT(DRM_SCALING_FILTER_DEFAULT) | BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); intel_plane_helper_add(plane); return plane; fail: intel_plane_free(plane); return ERR_PTR(ret); } void skl_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); enum plane_id plane_id = plane->id; enum pipe pipe; u32 val, base, offset, stride_mult, tiling, alpha; int fourcc, pixel_format; unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; static_assert(PLANE_CTL_TILED_YF == PLANE_CTL_TILED_4); if (!plane->get_hw_state(plane, &pipe)) return; drm_WARN_ON(dev, pipe != crtc->pipe); if (crtc_state->bigjoiner_pipes) { drm_dbg_kms(&dev_priv->drm, "Unsupported bigjoiner configuration for initial FB\n"); return; } intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); return; } fb = &intel_fb->base; fb->dev = dev; val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); if (DISPLAY_VER(dev_priv) >= 11) pixel_format = val & PLANE_CTL_FORMAT_MASK_ICL; else pixel_format = val & PLANE_CTL_FORMAT_MASK_SKL; if (DISPLAY_VER(dev_priv) >= 10) { u32 color_ctl; color_ctl = intel_de_read(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); alpha = REG_FIELD_GET(PLANE_COLOR_ALPHA_MASK, color_ctl); } else { alpha = REG_FIELD_GET(PLANE_CTL_ALPHA_MASK, val); } fourcc = skl_format_to_fourcc(pixel_format, val & PLANE_CTL_ORDER_RGBX, alpha); fb->format = drm_format_info(fourcc); tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { case PLANE_CTL_TILED_LINEAR: fb->modifier = DRM_FORMAT_MOD_LINEAR; break; case PLANE_CTL_TILED_X: plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: plane_config->tiling = I915_TILING_Y; if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) if (DISPLAY_VER(dev_priv) >= 14) fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS; else if (DISPLAY_VER(dev_priv) >= 12) fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) if (DISPLAY_VER(dev_priv) >= 14) fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */ if (HAS_4TILE(dev_priv)) { u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | PLANE_CTL_CLEAR_COLOR_DISABLE; if ((val & rc_mask) == rc_mask) fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS; else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS; else if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC; else fb->modifier = I915_FORMAT_MOD_4_TILED; } else { if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Yf_TILED; } break; default: MISSING_CASE(tiling); goto error; } if (!dev_priv->params.enable_dpt && intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n"); goto error; } /* * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr * while i915 HW rotation is clockwise, thats why this swapping. */ switch (val & PLANE_CTL_ROTATE_MASK) { case PLANE_CTL_ROTATE_0: plane_config->rotation = DRM_MODE_ROTATE_0; break; case PLANE_CTL_ROTATE_90: plane_config->rotation = DRM_MODE_ROTATE_270; break; case PLANE_CTL_ROTATE_180: plane_config->rotation = DRM_MODE_ROTATE_180; break; case PLANE_CTL_ROTATE_270: plane_config->rotation = DRM_MODE_ROTATE_90; break; } if (DISPLAY_VER(dev_priv) >= 11 && val & PLANE_CTL_FLIP_HORIZONTAL) plane_config->rotation |= DRM_MODE_REFLECT_X; /* 90/270 degree rotation would require extra work */ if (drm_rotation_90_or_270(plane_config->rotation)) goto error; base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK; plane_config->base = base; offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); drm_WARN_ON(&dev_priv->drm, offset != 0); val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1; fb->width = REG_FIELD_GET(PLANE_WIDTH_MASK, val) + 1; val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); fb->pitches[0] = REG_FIELD_GET(PLANE_STRIDE__MASK, val) * stride_mult; aligned_height = intel_fb_align_height(fb, 0, fb->height); plane_config->size = fb->pitches[0] * aligned_height; drm_dbg_kms(&dev_priv->drm, "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", crtc->base.name, plane->base.name, fb->width, fb->height, fb->format->cpp[0] * 8, base, fb->pitches[0], plane_config->size); plane_config->fb = intel_fb; return; error: kfree(intel_fb); }
linux-master
drivers/gpu/drm/i915/display/skl_universal_plane.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_panel.h" /** * DOC: Display Refresh Rate Switching (DRRS) * * Display Refresh Rate Switching (DRRS) is a power conservation feature * which enables swtching between low and high refresh rates, * dynamically, based on the usage scenario. This feature is applicable * for internal panels. * * Indication that the panel supports DRRS is given by the panel EDID, which * would list multiple refresh rates for one resolution. * * DRRS is of 2 types - static and seamless. * Static DRRS involves changing refresh rate (RR) by doing a full modeset * (may appear as a blink on screen) and is used in dock-undock scenario. * Seamless DRRS involves changing RR without any visual effect to the user * and can be used during normal system usage. This is done by programming * certain registers. * * Support for static/seamless DRRS may be indicated in the VBT based on * inputs from the panel spec. * * DRRS saves power by switching to low RR based on usage scenarios. * * The implementation is based on frontbuffer tracking implementation. When * there is a disturbance on the screen triggered by user activity or a periodic * system activity, DRRS is disabled (RR is changed to high RR). When there is * no movement on screen, after a timeout of 1 second, a switch to low RR is * made. * * For integration with frontbuffer tracking code, intel_drrs_invalidate() * and intel_drrs_flush() are called. * * DRRS can be further extended to support other internal panels and also * the scenario of video playback wherein RR is set based on the rate * requested by userspace. */ const char *intel_drrs_type_str(enum drrs_type drrs_type) { static const char * const str[] = { [DRRS_TYPE_NONE] = "none", [DRRS_TYPE_STATIC] = "static", [DRRS_TYPE_SEAMLESS] = "seamless", }; if (drrs_type >= ARRAY_SIZE(str)) return "<invalid>"; return str[drrs_type]; } static void intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, enum drrs_refresh_rate refresh_rate) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; u32 bit; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) bit = TRANSCONF_REFRESH_RATE_ALT_VLV; else bit = TRANSCONF_REFRESH_RATE_ALT_ILK; intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); } static void intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc, enum drrs_refresh_rate refresh_rate) { intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder, refresh_rate == DRRS_REFRESH_RATE_LOW ? &crtc->drrs.m2_n2 : &crtc->drrs.m_n); } bool intel_drrs_is_active(struct intel_crtc *crtc) { return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER; } static void intel_drrs_set_state(struct intel_crtc *crtc, enum drrs_refresh_rate refresh_rate) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (refresh_rate == crtc->drrs.refresh_rate) return; if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder)) intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate); else intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate); crtc->drrs.refresh_rate = refresh_rate; } static void intel_drrs_schedule_work(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); } static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); unsigned int frontbuffer_bits; frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, crtc_state->bigjoiner_pipes) frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); return frontbuffer_bits; } /** * intel_drrs_activate - activate DRRS * @crtc_state: the crtc state * * Activates DRRS on the crtc. */ void intel_drrs_activate(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (!crtc_state->has_drrs) return; if (!crtc_state->hw.active) return; if (intel_crtc_is_bigjoiner_slave(crtc_state)) return; mutex_lock(&crtc->drrs.mutex); crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder; crtc->drrs.m_n = crtc_state->dp_m_n; crtc->drrs.m2_n2 = crtc_state->dp_m2_n2; crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state); crtc->drrs.busy_frontbuffer_bits = 0; intel_drrs_schedule_work(crtc); mutex_unlock(&crtc->drrs.mutex); } /** * intel_drrs_deactivate - deactivate DRRS * @old_crtc_state: the old crtc state * * Deactivates DRRS on the crtc. */ void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); if (!old_crtc_state->has_drrs) return; if (!old_crtc_state->hw.active) return; if (intel_crtc_is_bigjoiner_slave(old_crtc_state)) return; mutex_lock(&crtc->drrs.mutex); if (intel_drrs_is_active(crtc)) intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; crtc->drrs.frontbuffer_bits = 0; crtc->drrs.busy_frontbuffer_bits = 0; mutex_unlock(&crtc->drrs.mutex); cancel_delayed_work_sync(&crtc->drrs.work); } static void intel_drrs_downclock_work(struct work_struct *work) { struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work); mutex_lock(&crtc->drrs.mutex); if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits) intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW); mutex_unlock(&crtc->drrs.mutex); } static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv, unsigned int all_frontbuffer_bits, bool invalidate) { struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) { unsigned int frontbuffer_bits; mutex_lock(&crtc->drrs.mutex); frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits; if (!frontbuffer_bits) { mutex_unlock(&crtc->drrs.mutex); continue; } if (invalidate) crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits; else crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; /* flush/invalidate means busy screen hence upclock */ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); /* * flush also means no more activity hence schedule downclock, if all * other fbs are quiescent too */ if (!crtc->drrs.busy_frontbuffer_bits) intel_drrs_schedule_work(crtc); else cancel_delayed_work(&crtc->drrs.work); mutex_unlock(&crtc->drrs.mutex); } } /** * intel_drrs_invalidate - Disable Idleness DRRS * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called everytime rendering on the given planes start. * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). * * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. */ void intel_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits) { intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true); } /** * intel_drrs_flush - Restart Idleness DRRS * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called every time rendering on the given planes has * completed or flip on a crtc is completed. So DRRS should be upclocked * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, * if no other planes are dirty. * * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. */ void intel_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits) { intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false); } /** * intel_drrs_crtc_init - Init DRRS for CRTC * @crtc: crtc * * This function is called only once at driver load to initialize basic * DRRS stuff. * */ void intel_drrs_crtc_init(struct intel_crtc *crtc) { INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work); mutex_init(&crtc->drrs.mutex); crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; } static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; const struct intel_crtc_state *crtc_state; int ret; ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); if (ret) return ret; crtc_state = to_intel_crtc_state(crtc->base.state); mutex_lock(&crtc->drrs.mutex); seq_printf(m, "DRRS enabled: %s\n", str_yes_no(crtc_state->has_drrs)); seq_printf(m, "DRRS active: %s\n", str_yes_no(intel_drrs_is_active(crtc))); seq_printf(m, "DRRS refresh rate: %s\n", crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ? "low" : "high"); seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n", crtc->drrs.busy_frontbuffer_bits); mutex_unlock(&crtc->drrs.mutex); drm_modeset_unlock(&crtc->base.mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status); static int intel_drrs_debugfs_ctl_set(void *data, u64 val) { struct intel_crtc *crtc = data; struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state; struct drm_crtc_commit *commit; int ret; ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); if (ret) return ret; crtc_state = to_intel_crtc_state(crtc->base.state); if (!crtc_state->hw.active || !crtc_state->has_drrs) goto out; commit = crtc_state->uapi.commit; if (commit) { ret = wait_for_completion_interruptible(&commit->hw_done); if (ret) goto out; } drm_dbg(&i915->drm, "Manually %sactivating DRRS\n", val ? "" : "de"); if (val) intel_drrs_activate(crtc_state); else intel_drrs_deactivate(crtc_state); out: drm_modeset_unlock(&crtc->base.mutex); return ret; } DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops, NULL, intel_drrs_debugfs_ctl_set, "%llu\n"); void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc) { debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry, crtc, &intel_drrs_debugfs_status_fops); debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry, crtc, &intel_drrs_debugfs_ctl_fops); } static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused) { struct intel_connector *connector = m->private; seq_printf(m, "DRRS type: %s\n", intel_drrs_type_str(intel_panel_drrs_type(connector))); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type); void intel_drrs_connector_debugfs_add(struct intel_connector *connector) { if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE) return; debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry, connector, &intel_drrs_debugfs_type_fops); }
linux-master
drivers/gpu/drm/i915/display/intel_drrs.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" #include "skl_scaler.h" #include "skl_universal_plane.h" /* * The hardware phase 0.0 refers to the center of the pixel. * We want to start from the top/left edge which is phase * -0.5. That matches how the hardware calculates the scaling * factors (from top-left of the first pixel to bottom-right * of the last pixel, as opposed to the pixel centers). * * For 4:2:0 subsampled chroma planes we obviously have to * adjust that so that the chroma sample position lands in * the right spot. * * Note that for packed YCbCr 4:2:2 formats there is no way to * control chroma siting. The hardware simply replicates the * chroma samples for both of the luma samples, and thus we don't * actually get the expected MPEG2 chroma siting convention :( * The same behaviour is observed on pre-SKL platforms as well. * * Theory behind the formula (note that we ignore sub-pixel * source coordinates): * s = source sample position * d = destination sample position * * Downscaling 4:1: * -0.5 * | 0.0 * | | 1.5 (initial phase) * | | | * v v v * | s | s | s | s | * | d | * * Upscaling 1:4: * -0.5 * | -0.375 (initial phase) * | | 0.0 * | | | * v v v * | s | * | d | d | d | d | */ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) { int phase = -0x8000; u16 trip = 0; if (chroma_cosited) phase += (sub - 1) * 0x8000 / sub; phase += scale / (2 * sub); /* * Hardware initial phase limited to [-0.5:1.5]. * Since the max hardware scale factor is 3.0, we * should never actually excdeed 1.0 here. */ WARN_ON(phase < -0x8000 || phase > 0x18000); if (phase < 0) phase = 0x10000 + phase; else trip = PS_PHASE_TRIP; return ((phase >> 2) & PS_PHASE_MASK) | trip; } #define SKL_MIN_SRC_W 8 #define SKL_MAX_SRC_W 4096 #define SKL_MIN_SRC_H 8 #define SKL_MAX_SRC_H 4096 #define SKL_MIN_DST_W 8 #define SKL_MAX_DST_W 4096 #define SKL_MIN_DST_H 8 #define SKL_MAX_DST_H 4096 #define ICL_MAX_SRC_W 5120 #define ICL_MAX_SRC_H 4096 #define ICL_MAX_DST_W 5120 #define ICL_MAX_DST_H 4096 #define TGL_MAX_SRC_W 5120 #define TGL_MAX_SRC_H 8192 #define TGL_MAX_DST_W 8192 #define TGL_MAX_DST_H 8192 #define MTL_MAX_SRC_W 4096 #define MTL_MAX_SRC_H 8192 #define MTL_MAX_DST_W 8192 #define MTL_MAX_DST_H 8192 #define SKL_MIN_YUV_420_SRC_W 16 #define SKL_MIN_YUV_420_SRC_H 16 static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, unsigned int scaler_user, int *scaler_id, int src_w, int src_h, int dst_w, int dst_h, const struct drm_format_info *format, u64 modifier, bool need_scaler) { struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); int min_src_w, min_src_h, min_dst_w, min_dst_h; int max_src_w, max_src_h, max_dst_w, max_dst_h; /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ if (src_w != dst_w || src_h != dst_h) need_scaler = true; /* * Scaling/fitting not supported in IF-ID mode in GEN9+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats. * Once NV12 is enabled, handle it here while allocating scaler * for NV12. */ if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable && need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { drm_dbg_kms(&dev_priv->drm, "Pipe/Plane scaling not supported with IF-ID mode\n"); return -EINVAL; } /* * if plane is being disabled or scaler is no more required or force detach * - free scaler binded to this plane/crtc * - in order to do this, update crtc->scaler_usage * * Here scaler state in crtc_state is set free so that * scaler can be assigned to other user. Actual register * update to free the scaler is done in plane/panel-fit programming. * For this purpose crtc/plane_state->scaler_id isn't reset here. */ if (force_detach || !need_scaler) { if (*scaler_id >= 0) { scaler_state->scaler_users &= ~(1 << scaler_user); scaler_state->scalers[*scaler_id].in_use = 0; drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " "Staged freeing scaler id %d scaler_users = 0x%x\n", crtc->pipe, scaler_user, *scaler_id, scaler_state->scaler_users); *scaler_id = -1; } return 0; } if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { drm_dbg_kms(&dev_priv->drm, "Planar YUV: src dimensions not met\n"); return -EINVAL; } min_src_w = SKL_MIN_SRC_W; min_src_h = SKL_MIN_SRC_H; min_dst_w = SKL_MIN_DST_W; min_dst_h = SKL_MIN_DST_H; if (DISPLAY_VER(dev_priv) < 11) { max_src_w = SKL_MAX_SRC_W; max_src_h = SKL_MAX_SRC_H; max_dst_w = SKL_MAX_DST_W; max_dst_h = SKL_MAX_DST_H; } else if (DISPLAY_VER(dev_priv) < 12) { max_src_w = ICL_MAX_SRC_W; max_src_h = ICL_MAX_SRC_H; max_dst_w = ICL_MAX_DST_W; max_dst_h = ICL_MAX_DST_H; } else if (DISPLAY_VER(dev_priv) < 14) { max_src_w = TGL_MAX_SRC_W; max_src_h = TGL_MAX_SRC_H; max_dst_w = TGL_MAX_DST_W; max_dst_h = TGL_MAX_DST_H; } else { max_src_w = MTL_MAX_SRC_W; max_src_h = MTL_MAX_SRC_H; max_dst_w = MTL_MAX_DST_W; max_dst_h = MTL_MAX_DST_H; } /* range checks */ if (src_w < min_src_w || src_h < min_src_h || dst_w < min_dst_w || dst_h < min_dst_h || src_w > max_src_w || src_h > max_src_h || dst_w > max_dst_w || dst_h > max_dst_h) { drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: src %ux%u dst %ux%u " "size is out of scaler range\n", crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); return -EINVAL; } /* * The pipe scaler does not use all the bits of PIPESRC, at least * on the earlier platforms. So even when we're scaling a plane * the *pipe* source size must not be too large. For simplicity * we assume the limits match the scaler source size limits. Might * not be 100% accurate on all platforms, but good enough for now. */ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) { drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: pipe src size %ux%u " "is out of scaler range\n", crtc->pipe, scaler_user, pipe_src_w, pipe_src_h); return -EINVAL; } /* mark this plane as a scaler user in crtc_state */ scaler_state->scaler_users |= (1 << scaler_user); drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, scaler_state->scaler_users); return 0; } int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) { const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int width, height; if (crtc_state->pch_pfit.enabled) { width = drm_rect_width(&crtc_state->pch_pfit.dst); height = drm_rect_height(&crtc_state->pch_pfit.dst); } else { width = pipe_mode->crtc_hdisplay; height = pipe_mode->crtc_vdisplay; } return skl_update_scaler(crtc_state, !crtc_state->hw.active, SKL_CRTC_INDEX, &crtc_state->scaler_state.scaler_id, drm_rect_width(&crtc_state->pipe_src), drm_rect_height(&crtc_state->pipe_src), width, height, NULL, 0, crtc_state->pch_pfit.enabled); } /** * skl_update_scaler_plane - Stages update to scaler state for a given plane. * @crtc_state: crtc's scaler state * @plane_state: atomic plane state to update * * Return * 0 - scaler_usage updated successfully * error - requested scaling cannot be supported or other error condition */ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *intel_plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); struct drm_framebuffer *fb = plane_state->hw.fb; int ret; bool force_detach = !fb || !plane_state->uapi.visible; bool need_scaler = false; /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, drm_rect_width(&plane_state->uapi.src) >> 16, drm_rect_height(&plane_state->uapi.src) >> 16, drm_rect_width(&plane_state->uapi.dst), drm_rect_height(&plane_state->uapi.dst), fb ? fb->format : NULL, fb ? fb->modifier : 0, need_scaler); if (ret || plane_state->scaler_id < 0) return ret; /* check colorkey */ if (plane_state->ckey.flags) { drm_dbg_kms(&dev_priv->drm, "[PLANE:%d:%s] scaling with color key not allowed", intel_plane->base.base.id, intel_plane->base.name); return -EINVAL; } /* Check src format */ switch (fb->format->format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: case DRM_FORMAT_XYUV8888: case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU2101010: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: break; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: if (DISPLAY_VER(dev_priv) >= 11) break; fallthrough; default: drm_dbg_kms(&dev_priv->drm, "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", intel_plane->base.base.id, intel_plane->base.name, fb->base.id, fb->format->format); return -EINVAL; } return 0; } static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, int num_scalers_need, struct intel_crtc *intel_crtc, const char *name, int idx, struct intel_plane_state *plane_state, int *scaler_id) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); int j; u32 mode; if (*scaler_id < 0) { /* find a free scaler */ for (j = 0; j < intel_crtc->num_scalers; j++) { if (scaler_state->scalers[j].in_use) continue; *scaler_id = j; scaler_state->scalers[*scaler_id].in_use = 1; break; } } if (drm_WARN(&dev_priv->drm, *scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx)) return -EINVAL; /* set scaler mode */ if (plane_state && plane_state->hw.fb && plane_state->hw.fb->format->is_yuv && plane_state->hw.fb->format->num_planes > 1) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); if (DISPLAY_VER(dev_priv) == 9) { mode = SKL_PS_SCALER_MODE_NV12; } else if (icl_is_hdr_plane(dev_priv, plane->id)) { /* * On gen11+'s HDR planes we only use the scaler for * scaling. They have a dedicated chroma upsampler, so * we don't need the scaler to upsample the UV plane. */ mode = PS_SCALER_MODE_NORMAL; } else { struct intel_plane *linked = plane_state->planar_linked_plane; mode = PS_SCALER_MODE_PLANAR; if (linked) mode |= PS_BINDING_Y_PLANE(linked->id); } } else if (DISPLAY_VER(dev_priv) >= 10) { mode = PS_SCALER_MODE_NORMAL; } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { /* * when only 1 scaler is in use on a pipe with 2 scalers * scaler 0 operates in high quality (HQ) mode. * In this case use scaler 0 to take advantage of HQ mode */ scaler_state->scalers[*scaler_id].in_use = 0; *scaler_id = 0; scaler_state->scalers[0].in_use = 1; mode = SKL_PS_SCALER_MODE_HQ; } else { mode = SKL_PS_SCALER_MODE_DYN; } /* * FIXME: we should also check the scaler factors for pfit, so * this shouldn't be tied directly to planes. */ if (plane_state && plane_state->hw.fb) { const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_rect *src = &plane_state->uapi.src; const struct drm_rect *dst = &plane_state->uapi.dst; int hscale, vscale, max_vscale, max_hscale; /* * FIXME: When two scalers are needed, but only one of * them needs to downscale, we should make sure that * the one that needs downscaling support is assigned * as the first scaler, so we don't reject downscaling * unnecessarily. */ if (DISPLAY_VER(dev_priv) >= 14) { /* * On versions 14 and up, only the first * scaler supports a vertical scaling factor * of more than 1.0, while a horizontal * scaling factor of 3.0 is supported. */ max_hscale = 0x30000 - 1; if (*scaler_id == 0) max_vscale = 0x30000 - 1; else max_vscale = 0x10000; } else if (DISPLAY_VER(dev_priv) >= 10 || !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { max_hscale = 0x30000 - 1; max_vscale = 0x30000 - 1; } else { max_hscale = 0x20000 - 1; max_vscale = 0x20000 - 1; } /* * FIXME: We should change the if-else block above to * support HQ vs dynamic scaler properly. */ /* Check if required scaling is within limits */ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); if (hscale < 0 || vscale < 0) { drm_dbg_kms(&dev_priv->drm, "Scaler %d doesn't support required plane scaling\n", *scaler_id); drm_rect_debug_print("src: ", src, true); drm_rect_debug_print("dst: ", dst, false); return -EINVAL; } } drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", intel_crtc->pipe, *scaler_id, name, idx); scaler_state->scalers[*scaler_id].mode = mode; return 0; } /** * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests * @dev_priv: i915 device * @intel_crtc: intel crtc * @crtc_state: incoming crtc_state to validate and setup scalers * * This function sets up scalers based on staged scaling requests for * a @crtc and its planes. It is called from crtc level check path. If request * is a supportable request, it attaches scalers to requested planes and crtc. * * This function takes into account the current scaler(s) in use by any planes * not being part of this atomic state * * Returns: * 0 - scalers were setup successfully * error code - otherwise */ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) { struct drm_plane *plane = NULL; struct intel_plane *intel_plane; struct intel_plane_state *plane_state = NULL; struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; struct drm_atomic_state *drm_state = crtc_state->uapi.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); int num_scalers_need; int i; num_scalers_need = hweight32(scaler_state->scaler_users); /* * High level flow: * - staged scaler requests are already in scaler_state->scaler_users * - check whether staged scaling requests can be supported * - add planes using scalers that aren't in current transaction * - assign scalers to requested users * - as part of plane commit, scalers will be committed * (i.e., either attached or detached) to respective planes in hw * - as part of crtc_commit, scaler will be either attached or detached * to crtc in hw */ /* fail if required scalers > available scalers */ if (num_scalers_need > intel_crtc->num_scalers) { drm_dbg_kms(&dev_priv->drm, "Too many scaling requests %d > %d\n", num_scalers_need, intel_crtc->num_scalers); return -EINVAL; } /* walkthrough scaler_users bits and start assigning scalers */ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { int *scaler_id; const char *name; int idx, ret; /* skip if scaler not required */ if (!(scaler_state->scaler_users & (1 << i))) continue; if (i == SKL_CRTC_INDEX) { name = "CRTC"; idx = intel_crtc->base.base.id; /* panel fitter case: assign as a crtc scaler */ scaler_id = &scaler_state->scaler_id; } else { name = "PLANE"; /* plane scaler case: assign as a plane scaler */ /* find the plane that set the bit as scaler_user */ plane = drm_state->planes[i].ptr; /* * to enable/disable hq mode, add planes that are using scaler * into this transaction */ if (!plane) { struct drm_plane_state *state; /* * GLK+ scalers don't have a HQ mode so it * isn't necessary to change between HQ and dyn mode * on those platforms. */ if (DISPLAY_VER(dev_priv) >= 10) continue; plane = drm_plane_from_index(&dev_priv->drm, i); state = drm_atomic_get_plane_state(drm_state, plane); if (IS_ERR(state)) { drm_dbg_kms(&dev_priv->drm, "Failed to add [PLANE:%d] to drm_state\n", plane->base.id); return PTR_ERR(state); } } intel_plane = to_intel_plane(plane); idx = plane->base.id; /* plane on different crtc cannot be a scaler user of this crtc */ if (drm_WARN_ON(&dev_priv->drm, intel_plane->pipe != intel_crtc->pipe)) continue; plane_state = intel_atomic_get_new_plane_state(intel_state, intel_plane); scaler_id = &plane_state->scaler_id; } ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need, intel_crtc, name, idx, plane_state, scaler_id); if (ret < 0) return ret; } return 0; } static int glk_coef_tap(int i) { return i % 7; } static u16 glk_nearest_filter_coef(int t) { return t == 3 ? 0x0800 : 0x3000; } /* * Theory behind setting nearest-neighbor integer scaling: * * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. * The letter represents the filter tap (D is the center tap) and the number * represents the coefficient set for a phase (0-16). * * +------------+------------------------+------------------------+ * |Index value | Data value coeffient 1 | Data value coeffient 2 | * +------------+------------------------+------------------------+ * | 00h | B0 | A0 | * +------------+------------------------+------------------------+ * | 01h | D0 | C0 | * +------------+------------------------+------------------------+ * | 02h | F0 | E0 | * +------------+------------------------+------------------------+ * | 03h | A1 | G0 | * +------------+------------------------+------------------------+ * | 04h | C1 | B1 | * +------------+------------------------+------------------------+ * | ... | ... | ... | * +------------+------------------------+------------------------+ * | 38h | B16 | A16 | * +------------+------------------------+------------------------+ * | 39h | D16 | C16 | * +------------+------------------------+------------------------+ * | 3Ah | F16 | C16 | * +------------+------------------------+------------------------+ * | 3Bh | Reserved | G16 | * +------------+------------------------+------------------------+ * * To enable nearest-neighbor scaling: program scaler coefficents with * the center tap (Dxx) values set to 1 and all other values set to 0 as per * SCALER_COEFFICIENT_FORMAT * */ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, enum pipe pipe, int id, int set) { int i; intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), PS_COEF_INDEX_AUTO_INC); for (i = 0; i < 17 * 7; i += 2) { u32 tmp; int t; t = glk_coef_tap(i); tmp = glk_nearest_filter_coef(t); t = glk_coef_tap(i + 1); tmp |= glk_nearest_filter_coef(t) << 16; intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set), tmp); } intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0); } static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) { if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { return (PS_FILTER_PROGRAMMED | PS_Y_VERT_FILTER_SELECT(set) | PS_Y_HORZ_FILTER_SELECT(set) | PS_UV_VERT_FILTER_SELECT(set) | PS_UV_HORZ_FILTER_SELECT(set)); } return PS_FILTER_MEDIUM; } static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, int id, int set, enum drm_scaling_filter filter) { switch (filter) { case DRM_SCALING_FILTER_DEFAULT: break; case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: glk_program_nearest_filter_coefs(dev_priv, pipe, id, set); break; default: MISSING_CASE(filter); } } void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; const struct drm_rect *dst = &crtc_state->pch_pfit.dst; u16 uv_rgb_hphase, uv_rgb_vphase; enum pipe pipe = crtc->pipe; int width = drm_rect_width(dst); int height = drm_rect_height(dst); int x = dst->x1; int y = dst->y1; int hscale, vscale; struct drm_rect src; int id; u32 ps_ctrl; if (!crtc_state->pch_pfit.enabled) return; if (drm_WARN_ON(&dev_priv->drm, crtc_state->scaler_state.scaler_id < 0)) return; drm_rect_init(&src, 0, 0, drm_rect_width(&crtc_state->pipe_src) << 16, drm_rect_height(&crtc_state->pipe_src) << 16); hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); id = scaler_state->scaler_id; ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode | skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); skl_scaler_setup_filter(dev_priv, pipe, id, 0, crtc_state->hw.scaling_filter); intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl); intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), PS_WIN_XPOS(x) | PS_WIN_YPOS(y)); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height)); } void skl_program_plane_scaler(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler = &crtc_state->scaler_state.scalers[scaler_id]; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u16 y_hphase, uv_rgb_hphase; u16 y_vphase, uv_rgb_vphase; int hscale, vscale; u32 ps_ctrl; hscale = drm_rect_calc_hscale(&plane_state->uapi.src, &plane_state->uapi.dst, 0, INT_MAX); vscale = drm_rect_calc_vscale(&plane_state->uapi.src, &plane_state->uapi.dst, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); /* MPEG2 chroma siting convention */ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); } else { /* not used */ y_hphase = 0; y_vphase = 0; uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); } ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode | skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0, plane_state->hw.scaling_filter); intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id), PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id), PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y)); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id), PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h)); } static void skl_detach_scaler(struct intel_crtc *crtc, int id) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0); intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0); intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0); } /* * This function detaches (aka. unbinds) unused scalers in hardware */ void skl_detach_scalers(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int i; /* loop through and disable scalers that aren't in use */ for (i = 0; i < crtc->num_scalers; i++) { if (!scaler_state->scalers[i].in_use) skl_detach_scaler(crtc, i); } } void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); int i; for (i = 0; i < crtc->num_scalers; i++) skl_detach_scaler(crtc, i); } void skl_scaler_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int id = -1; int i; /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { u32 ctl, pos, size; ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE)) continue; id = i; crtc_state->pch_pfit.enabled = true; pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i)); size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i)); drm_rect_init(&crtc_state->pch_pfit.dst, REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); scaler_state->scalers[i].in_use = true; break; } scaler_state->scaler_id = id; if (id >= 0) scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); else scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); }
linux-master
drivers/gpu/drm/i915/display/skl_scaler.c
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * Thomas Richter <[email protected]> * * Minor modifications (Dithering enable): * Thomas Richter <[email protected]> * */ #include "intel_display_types.h" #include "intel_dvo_dev.h" /* * register definitions for the i82807aa. * * Documentation on this chipset can be found in datasheet #29069001 at * intel.com. */ /* * VCH Revision & GMBus Base Addr */ #define VR00 0x00 # define VR00_BASE_ADDRESS_MASK 0x007f /* * Functionality Enable */ #define VR01 0x01 /* * Enable the panel fitter */ # define VR01_PANEL_FIT_ENABLE (1 << 3) /* * Enables the LCD display. * * This must not be set while VR01_DVO_BYPASS_ENABLE is set. */ # define VR01_LCD_ENABLE (1 << 2) /* Enables the DVO repeater. */ # define VR01_DVO_BYPASS_ENABLE (1 << 1) /* Enables the DVO clock */ # define VR01_DVO_ENABLE (1 << 0) /* Enable dithering for 18bpp panels. Not documented. */ # define VR01_DITHER_ENABLE (1 << 4) /* * LCD Interface Format */ #define VR10 0x10 /* Enables LVDS output instead of CMOS */ # define VR10_LVDS_ENABLE (1 << 4) /* Enables 18-bit LVDS output. */ # define VR10_INTERFACE_1X18 (0 << 2) /* Enables 24-bit LVDS or CMOS output */ # define VR10_INTERFACE_1X24 (1 << 2) /* Enables 2x18-bit LVDS or CMOS output. */ # define VR10_INTERFACE_2X18 (2 << 2) /* Enables 2x24-bit LVDS output */ # define VR10_INTERFACE_2X24 (3 << 2) /* Mask that defines the depth of the pipeline */ # define VR10_INTERFACE_DEPTH_MASK (3 << 2) /* * VR20 LCD Horizontal Display Size */ #define VR20 0x20 /* * LCD Vertical Display Size */ #define VR21 0x21 /* * Panel power down status */ #define VR30 0x30 /* Read only bit indicating that the panel is not in a safe poweroff state. */ # define VR30_PANEL_ON (1 << 15) #define VR40 0x40 # define VR40_STALL_ENABLE (1 << 13) # define VR40_VERTICAL_INTERP_ENABLE (1 << 12) # define VR40_ENHANCED_PANEL_FITTING (1 << 11) # define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) # define VR40_AUTO_RATIO_ENABLE (1 << 9) # define VR40_CLOCK_GATING_ENABLE (1 << 8) /* * Panel Fitting Vertical Ratio * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 */ #define VR41 0x41 /* * Panel Fitting Horizontal Ratio * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 */ #define VR42 0x42 /* * Horizontal Image Size */ #define VR43 0x43 /* VR80 GPIO 0 */ #define VR80 0x80 #define VR81 0x81 #define VR82 0x82 #define VR83 0x83 #define VR84 0x84 #define VR85 0x85 #define VR86 0x86 #define VR87 0x87 /* VR88 GPIO 8 */ #define VR88 0x88 /* Graphics BIOS scratch 0 */ #define VR8E 0x8E # define VR8E_PANEL_TYPE_MASK (0xf << 0) # define VR8E_PANEL_INTERFACE_CMOS (0 << 4) # define VR8E_PANEL_INTERFACE_LVDS (1 << 4) # define VR8E_FORCE_DEFAULT_PANEL (1 << 5) /* Graphics BIOS scratch 1 */ #define VR8F 0x8F # define VR8F_VCH_PRESENT (1 << 0) # define VR8F_DISPLAY_CONN (1 << 1) # define VR8F_POWER_MASK (0x3c) # define VR8F_POWER_POS (2) /* Some Bios implementations do not restore the DVO state upon * resume from standby. Thus, this driver has to handle it * instead. The following list contains all registers that * require saving. */ static const u16 backup_addresses[] = { 0x11, 0x12, 0x18, 0x19, 0x1a, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x8e, 0x8f, 0x10 /* this must come last */ }; struct ivch_priv { bool quiet; u16 width, height; /* Register backup */ u16 reg_backup[ARRAY_SIZE(backup_addresses)]; }; static void ivch_dump_regs(struct intel_dvo_device *dvo); /* * Reads a register on the ivch. * * Each of the 256 registers are 16 bits long. */ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[1]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 0, }, { .addr = 0, .flags = I2C_M_NOSTART, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 2, .buf = in_buf, } }; out_buf[0] = addr; if (i2c_transfer(adapter, msgs, 3) == 3) { *data = (in_buf[1] << 8) | in_buf[0]; return true; } if (!priv->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from " "%s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* Writes a 16-bit register on the ivch */ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[3]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 3, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = data & 0xff; out_buf[2] = data >> 8; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!priv->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* Probes the given bus and slave address for an ivch */ static bool ivch_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { struct ivch_priv *priv; u16 temp; int i; priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); if (priv == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = priv; priv->quiet = true; if (!ivch_read(dvo, VR00, &temp)) goto out; priv->quiet = false; /* Since the identification bits are probably zeroes, which doesn't seem * very unique, check that the value in the base address field matches * the address it's responding on. */ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { DRM_DEBUG_KMS("ivch detect failed due to address mismatch " "(%d vs %d)\n", (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); goto out; } ivch_read(dvo, VR20, &priv->width); ivch_read(dvo, VR21, &priv->height); /* Make a backup of the registers to be able to restore them * upon suspend. */ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) ivch_read(dvo, backup_addresses[i], priv->reg_backup + i); ivch_dump_regs(dvo); return true; out: kfree(priv); return false; } static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) { return connector_status_connected; } static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { if (mode->clock > 112000) return MODE_CLOCK_HIGH; return MODE_OK; } /* Restore the DVO registers after a resume * from RAM. Registers have been saved during * the initialization. */ static void ivch_reset(struct intel_dvo_device *dvo) { struct ivch_priv *priv = dvo->dev_priv; int i; DRM_DEBUG_KMS("Resetting the IVCH registers\n"); ivch_write(dvo, VR10, 0x0000); for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]); } /* Sets the power state of the panel connected to the ivch */ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) { int i; u16 vr01, vr30, backlight; ivch_reset(dvo); /* Set the new power state of the panel. */ if (!ivch_read(dvo, VR01, &vr01)) return; if (enable) backlight = 1; else backlight = 0; ivch_write(dvo, VR80, backlight); if (enable) vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; else vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); ivch_write(dvo, VR01, vr01); /* Wait for the panel to make its state transition */ for (i = 0; i < 100; i++) { if (!ivch_read(dvo, VR30, &vr30)) break; if (((vr30 & VR30_PANEL_ON) != 0) == enable) break; udelay(1000); } /* wait some more; vch may fail to resync sometimes without this */ udelay(16 * 1000); } static bool ivch_get_hw_state(struct intel_dvo_device *dvo) { u16 vr01; ivch_reset(dvo); /* Set the new power state of the panel. */ if (!ivch_read(dvo, VR01, &vr01)) return false; if (vr01 & VR01_LCD_ENABLE) return true; else return false; } static void ivch_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct ivch_priv *priv = dvo->dev_priv; u16 vr40 = 0; u16 vr01 = 0; u16 vr10; ivch_reset(dvo); vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1]; /* Enable dithering for 18 bpp pipelines */ vr10 &= VR10_INTERFACE_DEPTH_MASK; if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18) vr01 = VR01_DITHER_ENABLE; vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | VR40_HORIZONTAL_INTERP_ENABLE); if (mode->hdisplay != adjusted_mode->crtc_hdisplay || mode->vdisplay != adjusted_mode->crtc_vdisplay) { u16 x_ratio, y_ratio; vr01 |= VR01_PANEL_FIT_ENABLE; vr40 |= VR40_CLOCK_GATING_ENABLE; x_ratio = (((mode->hdisplay - 1) << 16) / (adjusted_mode->crtc_hdisplay - 1)) >> 2; y_ratio = (((mode->vdisplay - 1) << 16) / (adjusted_mode->crtc_vdisplay - 1)) >> 2; ivch_write(dvo, VR42, x_ratio); ivch_write(dvo, VR41, y_ratio); } else { vr01 &= ~VR01_PANEL_FIT_ENABLE; vr40 &= ~VR40_CLOCK_GATING_ENABLE; } vr40 &= ~VR40_AUTO_RATIO_ENABLE; ivch_write(dvo, VR01, vr01); ivch_write(dvo, VR40, vr40); } static void ivch_dump_regs(struct intel_dvo_device *dvo) { u16 val; ivch_read(dvo, VR00, &val); DRM_DEBUG_KMS("VR00: 0x%04x\n", val); ivch_read(dvo, VR01, &val); DRM_DEBUG_KMS("VR01: 0x%04x\n", val); ivch_read(dvo, VR10, &val); DRM_DEBUG_KMS("VR10: 0x%04x\n", val); ivch_read(dvo, VR30, &val); DRM_DEBUG_KMS("VR30: 0x%04x\n", val); ivch_read(dvo, VR40, &val); DRM_DEBUG_KMS("VR40: 0x%04x\n", val); /* GPIO registers */ ivch_read(dvo, VR80, &val); DRM_DEBUG_KMS("VR80: 0x%04x\n", val); ivch_read(dvo, VR81, &val); DRM_DEBUG_KMS("VR81: 0x%04x\n", val); ivch_read(dvo, VR82, &val); DRM_DEBUG_KMS("VR82: 0x%04x\n", val); ivch_read(dvo, VR83, &val); DRM_DEBUG_KMS("VR83: 0x%04x\n", val); ivch_read(dvo, VR84, &val); DRM_DEBUG_KMS("VR84: 0x%04x\n", val); ivch_read(dvo, VR85, &val); DRM_DEBUG_KMS("VR85: 0x%04x\n", val); ivch_read(dvo, VR86, &val); DRM_DEBUG_KMS("VR86: 0x%04x\n", val); ivch_read(dvo, VR87, &val); DRM_DEBUG_KMS("VR87: 0x%04x\n", val); ivch_read(dvo, VR88, &val); DRM_DEBUG_KMS("VR88: 0x%04x\n", val); /* Scratch register 0 - AIM Panel type */ ivch_read(dvo, VR8E, &val); DRM_DEBUG_KMS("VR8E: 0x%04x\n", val); /* Scratch register 1 - Status register */ ivch_read(dvo, VR8F, &val); DRM_DEBUG_KMS("VR8F: 0x%04x\n", val); } static void ivch_destroy(struct intel_dvo_device *dvo) { struct ivch_priv *priv = dvo->dev_priv; if (priv) { kfree(priv); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops ivch_ops = { .init = ivch_init, .dpms = ivch_dpms, .get_hw_state = ivch_get_hw_state, .mode_valid = ivch_mode_valid, .mode_set = ivch_mode_set, .detect = ivch_detect, .dump_regs = ivch_dump_regs, .destroy = ivch_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_ivch.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> #include <drm/drm_vblank_work.h> #include "i915_vgpu.h" #include "i9xx_plane.h" #include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_cursor.h" #include "intel_display_debugfs.h" #include "intel_display_irq.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_dsi.h" #include "intel_fifo_underrun.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" #include "intel_vblank.h" #include "intel_vrr.h" #include "skl_universal_plane.h" static void assert_vblank_disabled(struct drm_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->dev); if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n", crtc->base.id, crtc->name)) drm_crtc_vblank_put(crtc); } struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) { return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); } struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe) { struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) { if (crtc->pipe == pipe) return crtc; } return NULL; } void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) { drm_crtc_wait_one_vblank(&crtc->base); } void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); if (crtc->active) intel_crtc_wait_for_next_vblank(crtc); } u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; if (!crtc->active) return 0; if (!vblank->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); return crtc->base.funcs->get_vblank_counter(&crtc->base); } u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* * From Gen 11, In case of dsi cmd mode, frame counter wouldnt * have updated at the beginning of TE, if we want to use * the hw counter, then we would find it updated in only * the next TE, hence switching to sw counter. */ if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1)) return 0; /* * On i965gm the hardware frame counter reads * zero when the TV encoder is enabled :( */ if (IS_I965GM(dev_priv) && (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) return 0; if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) return 0xffffffff; /* full 32 bit counter */ else if (DISPLAY_VER(dev_priv) >= 3) return 0xffffff; /* only 24 bits of frame count */ else return 0; /* Gen2 doesn't have a hardware frame counter */ } void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); assert_vblank_disabled(&crtc->base); drm_crtc_set_max_vblank_count(&crtc->base, intel_crtc_max_vblank_count(crtc_state)); drm_crtc_vblank_on(&crtc->base); /* * Should really happen exactly when we enable the pipe * but we want the frame counters in the trace, and that * requires vblank support on some platforms/outputs. */ trace_intel_pipe_enable(crtc); } void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* * Should really happen exactly when we disable the pipe * but we want the frame counters in the trace, and that * requires vblank support on some platforms/outputs. */ trace_intel_pipe_disable(crtc); drm_crtc_vblank_off(&crtc->base); assert_vblank_disabled(&crtc->base); } struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state; crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); if (crtc_state) intel_crtc_state_reset(crtc_state, crtc); return crtc_state; } void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, struct intel_crtc *crtc) { memset(crtc_state, 0, sizeof(*crtc_state)); __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); crtc_state->cpu_transcoder = INVALID_TRANSCODER; crtc_state->master_transcoder = INVALID_TRANSCODER; crtc_state->hsw_workaround_pipe = INVALID_PIPE; crtc_state->scaler_state.scaler_id = -1; crtc_state->mst_master_transcoder = INVALID_TRANSCODER; } static struct intel_crtc *intel_crtc_alloc(void) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); if (!crtc) return ERR_PTR(-ENOMEM); crtc_state = intel_crtc_state_alloc(crtc); if (!crtc_state) { kfree(crtc); return ERR_PTR(-ENOMEM); } crtc->base.state = &crtc_state->uapi; crtc->config = crtc_state; return crtc; } static void intel_crtc_free(struct intel_crtc *crtc) { intel_crtc_destroy_state(&crtc->base, crtc->base.state); kfree(crtc); } static void intel_crtc_destroy(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); drm_crtc_cleanup(&crtc->base); kfree(crtc); } static int intel_crtc_late_register(struct drm_crtc *crtc) { intel_crtc_debugfs_add(to_intel_crtc(crtc)); return 0; } #define INTEL_CRTC_FUNCS \ .set_config = drm_atomic_helper_set_config, \ .destroy = intel_crtc_destroy, \ .page_flip = drm_atomic_helper_page_flip, \ .atomic_duplicate_state = intel_crtc_duplicate_state, \ .atomic_destroy_state = intel_crtc_destroy_state, \ .set_crc_source = intel_crtc_set_crc_source, \ .verify_crc_source = intel_crtc_verify_crc_source, \ .get_crc_sources = intel_crtc_get_crc_sources, \ .late_register = intel_crtc_late_register static const struct drm_crtc_funcs bdw_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = bdw_enable_vblank, .disable_vblank = bdw_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs ilk_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = ilk_enable_vblank, .disable_vblank = ilk_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs g4x_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = i965_enable_vblank, .disable_vblank = i965_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i965_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i965_enable_vblank, .disable_vblank = i965_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i915gm_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i915gm_enable_vblank, .disable_vblank = i915gm_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i915_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i8xx_enable_vblank, .disable_vblank = i8xx_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i8xx_crtc_funcs = { INTEL_CRTC_FUNCS, /* no hw vblank counter */ .enable_vblank = i8xx_enable_vblank, .disable_vblank = i8xx_disable_vblank, .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *primary, *cursor; const struct drm_crtc_funcs *funcs; struct intel_crtc *crtc; int sprite, ret; crtc = intel_crtc_alloc(); if (IS_ERR(crtc)) return PTR_ERR(crtc); crtc->pipe = pipe; crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe]; if (DISPLAY_VER(dev_priv) >= 9) primary = skl_universal_plane_create(dev_priv, pipe, PLANE_PRIMARY); else primary = intel_primary_plane_create(dev_priv, pipe); if (IS_ERR(primary)) { ret = PTR_ERR(primary); goto fail; } crtc->plane_ids_mask |= BIT(primary->id); intel_init_fifo_underrun_reporting(dev_priv, crtc, false); for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; if (DISPLAY_VER(dev_priv) >= 9) plane = skl_universal_plane_create(dev_priv, pipe, PLANE_SPRITE0 + sprite); else plane = intel_sprite_plane_create(dev_priv, pipe, sprite); if (IS_ERR(plane)) { ret = PTR_ERR(plane); goto fail; } crtc->plane_ids_mask |= BIT(plane->id); } cursor = intel_cursor_plane_create(dev_priv, pipe); if (IS_ERR(cursor)) { ret = PTR_ERR(cursor); goto fail; } crtc->plane_ids_mask |= BIT(cursor->id); if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) funcs = &g4x_crtc_funcs; else if (DISPLAY_VER(dev_priv) == 4) funcs = &i965_crtc_funcs; else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) funcs = &i915gm_crtc_funcs; else if (DISPLAY_VER(dev_priv) == 3) funcs = &i915_crtc_funcs; else funcs = &i8xx_crtc_funcs; } else { if (DISPLAY_VER(dev_priv) >= 8) funcs = &bdw_crtc_funcs; else funcs = &ilk_crtc_funcs; } ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, &primary->base, &cursor->base, funcs, "pipe %c", pipe_name(pipe)); if (ret) goto fail; if (DISPLAY_VER(dev_priv) >= 11) drm_crtc_create_scaling_filter_property(&crtc->base, BIT(DRM_SCALING_FILTER_DEFAULT) | BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); intel_color_crtc_init(crtc); intel_drrs_crtc_init(crtc); intel_crtc_crc_init(crtc); cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; fail: intel_crtc_free(crtc); return ret; } static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) { return crtc_state->hw.active && !intel_crtc_needs_modeset(crtc_state) && !crtc_state->preload_luts && intel_crtc_needs_color_update(crtc_state); } static void intel_crtc_vblank_work(struct kthread_work *base) { struct drm_vblank_work *work = to_drm_vblank_work(base); struct intel_crtc_state *crtc_state = container_of(work, typeof(*crtc_state), vblank_work); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_crtc_vblank_work_start(crtc); intel_color_load_luts(crtc_state); if (crtc_state->uapi.event) { spin_lock_irq(&crtc->base.dev->event_lock); drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); crtc_state->uapi.event = NULL; spin_unlock_irq(&crtc->base.dev->event_lock); } trace_intel_crtc_vblank_work_end(crtc); } static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, intel_crtc_vblank_work); /* * Interrupt latency is critical for getting the vblank * work executed as early as possible during the vblank. */ cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); } void intel_wait_for_vblank_workers(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!intel_crtc_needs_vblank_work(crtc_state)) continue; drm_vblank_work_flush(&crtc_state->vblank_work); cpu_latency_qos_update_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); } } int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { /* paranoia */ if (!adjusted_mode->crtc_htotal) return 1; return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, 1000 * adjusted_mode->crtc_htotal); } static int intel_mode_vblank_start(const struct drm_display_mode *mode) { int vblank_start = mode->crtc_vblank_start; if (mode->flags & DRM_MODE_FLAG_INTERLACE) vblank_start = DIV_ROUND_UP(vblank_start, 2); return vblank_start; } /** * intel_pipe_update_start() - start update of a set of display registers * @new_crtc_state: the new crtc state * * Mark the start of an update to pipe registers that should be updated * atomically regarding vblank. If the next vblank will happens within * the next 100 us, this function waits until the vblank passes. * * After a successful call to this function, interrupts will be disabled * until a subsequent call to intel_pipe_update_end(). That is done to * avoid random delays. */ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); DEFINE_WAIT(wait); intel_psr_lock(new_crtc_state); if (new_crtc_state->do_async_flip) return; if (intel_crtc_needs_vblank_work(new_crtc_state)) intel_crtc_vblank_work_init(new_crtc_state); if (new_crtc_state->vrr.enable) { if (intel_vrr_is_push_sent(new_crtc_state)) vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state); else vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); } else { vblank_start = intel_mode_vblank_start(adjusted_mode); } /* FIXME needs to be calibrated sensibly */ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, VBLANK_EVASION_TIME_US); max = vblank_start - 1; /* * M/N is double buffered on the transcoder's undelayed vblank, * so with seamless M/N we must evade both vblanks. */ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay; if (min <= 0 || max <= 0) goto irq_disable; if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) goto irq_disable; /* * Wait for psr to idle out after enabling the VBL interrupts * VBL interrupts will start the PSR exit and prevent a PSR * re-entry as well. */ intel_psr_wait_for_idle_locked(new_crtc_state); local_irq_disable(); crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; trace_intel_pipe_update_start(crtc); for (;;) { /* * prepare_to_wait() has a memory barrier, which guarantees * other CPUs can see the task state update by the time we * read the scanline. */ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); scanline = intel_get_crtc_scanline(crtc); if (scanline < min || scanline > max) break; if (!timeout) { drm_err(&dev_priv->drm, "Potential atomic update failure on pipe %c\n", pipe_name(crtc->pipe)); break; } local_irq_enable(); timeout = schedule_timeout(timeout); local_irq_disable(); } finish_wait(wq, &wait); drm_crtc_vblank_put(&crtc->base); /* * On VLV/CHV DSI the scanline counter would appear to * increment approx. 1/3 of a scanline before start of vblank. * The registers still get latched at start of vblank however. * This means we must not write any registers on the first * line of vblank (since not the whole line is actually in * vblank). And unfortunately we can't use the interrupt to * wait here since it will fire too soon. We could use the * frame start interrupt instead since it will fire after the * critical scanline, but that would require more changes * in the interrupt code. So for now we'll just do the nasty * thing and poll for the bad scanline to pass us by. * * FIXME figure out if BXT+ DSI suffers from this as well */ while (need_vlv_dsi_wa && scanline == vblank_start) scanline = intel_get_crtc_scanline(crtc); crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_intel_pipe_update_vblank_evaded(crtc); return; irq_disable: local_irq_disable(); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) { u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); unsigned int h; h = ilog2(delta >> 9); if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; crtc->debug.vbl.times[h]++; crtc->debug.vbl.sum += delta; if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) crtc->debug.vbl.min = delta; if (delta > crtc->debug.vbl.max) crtc->debug.vbl.max = delta; if (delta > 1000 * VBLANK_EVASION_TIME_US) { drm_dbg_kms(crtc->base.dev, "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", pipe_name(crtc->pipe), div_u64(delta, 1000), VBLANK_EVASION_TIME_US); crtc->debug.vbl.over++; } } #else static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} #endif /** * intel_pipe_update_end() - end update of a set of display registers * @new_crtc_state: the new crtc state * * Mark the end of an update started with intel_pipe_update_start(). This * re-enables interrupts and verifies the update was actually completed * before a vblank. */ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); ktime_t end_vbl_time = ktime_get(); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); intel_psr_unlock(new_crtc_state); if (new_crtc_state->do_async_flip) return; trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); /* * Incase of mipi dsi command mode, we need to set frame update * request for every commit. */ if (DISPLAY_VER(dev_priv) >= 11 && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) icl_dsi_frame_update(new_crtc_state); /* We're still in the vblank-evade critical section, this can't race. * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ if (intel_crtc_needs_vblank_work(new_crtc_state)) { drm_vblank_work_schedule(&new_crtc_state->vblank_work, drm_crtc_accurate_vblank_count(&crtc->base) + 1, false); } else if (new_crtc_state->uapi.event) { drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base) != 0); spin_lock(&crtc->base.dev->event_lock); drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->uapi.event); spin_unlock(&crtc->base.dev->event_lock); new_crtc_state->uapi.event = NULL; } /* * Send VRR Push to terminate Vblank. If we are already in vblank * this has to be done _after_ sampling the frame counter, as * otherwise the push would immediately terminate the vblank and * the sampled frame counter would correspond to the next frame * instead of the current frame. * * There is a tiny race here (iff vblank evasion failed us) where * we might sample the frame counter just before vmax vblank start * but the push would be sent just after it. That would cause the * push to affect the next frame instead of the current frame, * which would cause the next frame to terminate already at vmin * vblank start instead of vmax vblank start. */ intel_vrr_send_push(new_crtc_state); /* * Seamless M/N update may need to update frame timings. * * FIXME Should be synchronized with the start of vblank somehow... */ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) intel_crtc_update_active_timings(new_crtc_state, new_crtc_state->vrr.enable); local_irq_enable(); if (intel_vgpu_active(dev_priv)) return; if (crtc->debug.start_vbl_count && crtc->debug.start_vbl_count != end_vbl_count) { drm_err(&dev_priv->drm, "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", pipe_name(pipe), crtc->debug.start_vbl_count, end_vbl_count, ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time), crtc->debug.min_vbl, crtc->debug.max_vbl, crtc->debug.scanline_start, scanline_end); } dbg_vblank_evade(crtc, end_vbl_time); }
linux-master
drivers/gpu/drm/i915/display/intel_crtc.c
/* * Copyright © 2013 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Jani Nikula <[email protected]> */ #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" #include "skl_scaler.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" #include "vlv_sideband.h" /* return pixels in terms of txbyteclkhs */ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, u16 burst_mode_ratio) { return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, 8 * 100), lane_count); } /* return pixels equvalent to txbyteclkhs */ static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count, u16 burst_mode_ratio) { return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100), (bpp * burst_mode_ratio)); } enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) { /* It just so happens the VBT matches register contents. */ switch (fmt) { case VID_MODE_FORMAT_RGB888: return MIPI_DSI_FMT_RGB888; case VID_MODE_FORMAT_RGB666: return MIPI_DSI_FMT_RGB666; case VID_MODE_FORMAT_RGB666_PACKED: return MIPI_DSI_FMT_RGB666_PACKED; case VID_MODE_FORMAT_RGB565: return MIPI_DSI_FMT_RGB565; default: MISSING_CASE(fmt); return MIPI_DSI_FMT_RGB666; } } void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port), mask, 100)) drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n"); } static void write_data(struct drm_i915_private *dev_priv, i915_reg_t reg, const u8 *data, u32 len) { u32 i, j; for (i = 0; i < len; i += 4) { u32 val = 0; for (j = 0; j < min_t(u32, len - i, 4); j++) val |= *data++ << 8 * j; intel_de_write(dev_priv, reg, val); } } static void read_data(struct drm_i915_private *dev_priv, i915_reg_t reg, u8 *data, u32 len) { u32 i, j; for (i = 0; i < len; i += 4) { u32 val = intel_de_read(dev_priv, reg); for (j = 0; j < min_t(u32, len - i, 4); j++) *data++ = val >> 8 * j; } } static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dsi_host->port; struct mipi_dsi_packet packet; ssize_t ret; const u8 *header; i915_reg_t data_reg, ctrl_reg; u32 data_mask, ctrl_mask; ret = mipi_dsi_create_packet(&packet, msg); if (ret < 0) return ret; header = packet.header; if (msg->flags & MIPI_DSI_MSG_USE_LPM) { data_reg = MIPI_LP_GEN_DATA(port); data_mask = LP_DATA_FIFO_FULL; ctrl_reg = MIPI_LP_GEN_CTRL(port); ctrl_mask = LP_CTRL_FIFO_FULL; } else { data_reg = MIPI_HS_GEN_DATA(port); data_mask = HS_DATA_FIFO_FULL; ctrl_reg = MIPI_HS_GEN_CTRL(port); ctrl_mask = HS_CTRL_FIFO_FULL; } /* note: this is never true for reads */ if (packet.payload_length) { if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), data_mask, 50)) drm_err(&dev_priv->drm, "Timeout waiting for HS/LP DATA FIFO !full\n"); write_data(dev_priv, data_reg, packet.payload, packet.payload_length); } if (msg->rx_len) { intel_de_write(dev_priv, MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); } if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), ctrl_mask, 50)) { drm_err(&dev_priv->drm, "Timeout waiting for HS/LP CTRL FIFO !full\n"); } intel_de_write(dev_priv, ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]); /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), data_mask, 50)) drm_err(&dev_priv->drm, "Timeout waiting for read data.\n"); read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); } /* XXX: fix for reads and writes */ return 4 + packet.payload_length; } static int intel_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dsi) { return 0; } static int intel_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dsi) { return 0; } static const struct mipi_dsi_host_ops intel_dsi_host_ops = { .attach = intel_dsi_host_attach, .detach = intel_dsi_host_detach, .transfer = intel_dsi_host_transfer, }; /* * send a video mode command * * XXX: commands with data in MIPI_DPI_DATA? */ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; /* XXX: pipe, hs */ if (hs) cmd &= ~DPI_LP_MODE; else cmd |= DPI_LP_MODE; /* clear bit */ intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT); /* XXX: old code skips write if control unchanged */ if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port))) drm_dbg_kms(&dev_priv->drm, "Same special packet %02x twice in a row.\n", cmd); intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100)) drm_err(&dev_priv->drm, "Video mode command 0x%08x send failed.\n", cmd); return 0; } static void band_gap_reset(struct drm_i915_private *dev_priv) { vlv_flisdsi_get(dev_priv); vlv_flisdsi_write(dev_priv, 0x08, 0x0001); vlv_flisdsi_write(dev_priv, 0x0F, 0x0005); vlv_flisdsi_write(dev_priv, 0x0F, 0x0025); udelay(150); vlv_flisdsi_write(dev_priv, 0x0F, 0x0000); vlv_flisdsi_write(dev_priv, 0x08, 0x0000); vlv_flisdsi_put(dev_priv); } static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; ret = intel_panel_compute_config(intel_connector, adjusted_mode); if (ret) return ret; ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) pipe_config->pipe_bpp = 24; else pipe_config->pipe_bpp = 18; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { /* Enable Frame time stamp based scanline reporting */ pipe_config->mode_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; /* Dual link goes to DSI transcoder A. */ if (intel_dsi->ports == BIT(PORT_C)) pipe_config->cpu_transcoder = TRANSCODER_DSI_C; else pipe_config->cpu_transcoder = TRANSCODER_DSI_A; ret = bxt_dsi_pll_compute(encoder, pipe_config); if (ret) return -EINVAL; } else { ret = vlv_dsi_pll_compute(encoder, pipe_config); if (ret) return -EINVAL; } pipe_config->clock_set = true; return 0; } static bool glk_dsi_enable_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; bool cold_boot = false; /* Set the MIPI mode * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting. * Power ON MIPI IO first and then write into IO reset and LP wake bits */ for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE); /* Put the IO into reset */ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Program LP Wake */ for_each_dsi_port(port, intel_dsi->ports) { u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0); } /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 20)) drm_err(&dev_priv->drm, "MIPIO port is powergated\n"); } /* Check for cold boot scenario */ for_each_dsi_port(port, intel_dsi->ports) { cold_boot |= !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY); } return cold_boot; } static void glk_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) drm_err(&dev_priv->drm, "PHY is not ON\n"); } /* Get IO out of reset */ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); /* Get IO out of Low power state*/ for_each_dsi_port(port, intel_dsi->ports) { if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_MASK, DEVICE_READY); usleep_range(10, 15); } else { /* Enter ULPS */ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for ULPS active */ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 20)) drm_err(&dev_priv->drm, "ULPS not active\n"); /* Exit ULPS */ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY); /* Enter Normal Mode */ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_MASK, ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0); } } /* Wait for Stop state */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE, 20)) drm_err(&dev_priv->drm, "Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT, 20)) drm_err(&dev_priv->drm, "D-PHY not entering LP-11 state\n"); } } static void bxt_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); /* Enable MIPI PHY transparent latch */ for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); usleep_range(2000, 2500); } /* Clear ULPS and set device ready */ for_each_dsi_port(port, intel_dsi->ports) { val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); usleep_range(2000, 2500); val |= DEVICE_READY; intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); } } static void vlv_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); vlv_flisdsi_get(dev_priv); /* program rcomp for compliance, reduce from 50 ohms to 45 ohms * needed everytime after power gate */ vlv_flisdsi_write(dev_priv, 0x04, 0x0004); vlv_flisdsi_put(dev_priv); /* bandgap reset is needed after everytime we do power gate */ band_gap_reset(dev_priv); for_each_dsi_port(port, intel_dsi->ports) { intel_de_write(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); usleep_range(2500, 3000); /* Enable MIPI PHY transparent latch * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_EXIT); usleep_range(2500, 3000); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY); usleep_range(2500, 3000); } } static void intel_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (IS_GEMINILAKE(dev_priv)) glk_dsi_device_ready(encoder); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_dsi_device_ready(encoder); else vlv_dsi_device_ready(encoder); } static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Enter ULPS */ for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); } /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 20)) drm_err(&dev_priv->drm, "MIPI IO Port is not powergated\n"); } } static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Put the IO into reset */ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); } /* Clear MIPI mode */ for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0); } static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) { glk_dsi_enter_low_power_mode(encoder); glk_dsi_disable_mipi_io(encoder); } static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_EXIT); usleep_range(2000, 2500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); /* * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI * Port A only. MIPI Port C has no similar bit for checking. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) && intel_de_wait_for_clear(dev_priv, port_ctrl, AFE_LATCHOUT, 30)) drm_err(&dev_priv->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); usleep_range(2000, 2500); } } static void intel_dsi_port_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { u32 temp = intel_dsi->pixel_overlap; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, MIPI_CTRL(port), BXT_PIXEL_OVERLAP_CNT_MASK, temp << BXT_PIXEL_OVERLAP_CNT_SHIFT); } else { intel_de_rmw(dev_priv, VLV_CHICKEN_3, PIXEL_OVERLAP_CNT_MASK, temp << PIXEL_OVERLAP_CNT_SHIFT); } } for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); u32 temp; temp = intel_de_read(dev_priv, port_ctrl); temp &= ~LANE_CONFIGURATION_MASK; temp &= ~DUAL_LINK_MODE_MASK; if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT; if (IS_BROXTON(dev_priv)) temp |= LANE_CONFIGURATION_DUAL_LINK_A; else temp |= crtc->pipe ? LANE_CONFIGURATION_DUAL_LINK_B : LANE_CONFIGURATION_DUAL_LINK_A; } if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888) temp |= DITHERING_ENABLE; /* assert ip_tg_enable signal */ intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE); intel_de_posting_read(dev_priv, port_ctrl); } } static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); /* de-assert ip_tg_enable signal */ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); intel_de_posting_read(dev_priv, port_ctrl); } } static void intel_dsi_prepare(struct intel_encoder *intel_encoder, const struct intel_crtc_state *pipe_config); static void intel_dsi_unprepare(struct intel_encoder *encoder); /* * Panel enable/disable sequences from the VBT spec. * * Note the spec has AssertReset / DeassertReset swapped from their * usual naming. We use the normal names to avoid confusion (so below * they are swapped compared to the spec). * * Steps starting with MIPI refer to VBT sequences, note that for v2 * VBTs several steps which have a VBT in v2 are expected to be handled * directly by the driver, by directly driving gpios for example. * * v2 video mode seq v3 video mode seq command mode seq * - power on - MIPIPanelPowerOn - power on * - wait t1+t2 - wait t1+t2 * - MIPIDeassertResetPin - MIPIDeassertResetPin - MIPIDeassertResetPin * - io lines to lp-11 - io lines to lp-11 - io lines to lp-11 * - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds * - MIPITearOn * - MIPIDisplayOn * - turn on DPI - turn on DPI - set pipe to dsr mode * - MIPIDisplayOn - MIPIDisplayOn * - wait t5 - wait t5 * - backlight on - MIPIBacklightOn - backlight on * ... ... ... issue mem cmds ... * - backlight off - MIPIBacklightOff - backlight off * - wait t6 - wait t6 * - MIPIDisplayOff * - turn off DPI - turn off DPI - disable pipe dsr mode * - MIPITearOff * - MIPIDisplayOff - MIPIDisplayOff * - io lines to lp-00 - io lines to lp-00 - io lines to lp-00 * - MIPIAssertResetPin - MIPIAssertResetPin - MIPIAssertResetPin * - wait t3 - wait t3 * - power off - MIPIPanelPowerOff - power off * - wait t4 - wait t4 */ /* * DSI port enable has to be done before pipe and plane enable, so we do it in * the pre_enable hook instead of the enable hook. */ static void intel_dsi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum port port; bool glk_cold_boot = false; drm_dbg_kms(&dev_priv->drm, "\n"); intel_dsi_wait_panel_power_cycle(intel_dsi); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); /* * The BIOS may leave the PLL in a wonky state where it doesn't * lock. It needs to be fully powered down to fix it. */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { bxt_dsi_pll_disable(encoder); bxt_dsi_pll_enable(encoder, pipe_config); } else { vlv_dsi_pll_disable(encoder); vlv_dsi_pll_enable(encoder, pipe_config); } if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); /* Power up DSI regulator */ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0); } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Disable DPOunit clock gating, can stall pipe */ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 0, DPOUNIT_CLOCK_GATE_DISABLE); } if (!IS_GEMINILAKE(dev_priv)) intel_dsi_prepare(encoder, pipe_config); /* Give the panel time to power-on and then deassert its reset */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); msleep(intel_dsi->panel_on_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); if (IS_GEMINILAKE(dev_priv)) { glk_cold_boot = glk_dsi_enable_io(encoder); /* Prepare port in cold boot(s3/s4) scenario */ if (glk_cold_boot) intel_dsi_prepare(encoder, pipe_config); } /* Put device in ready state (LP-11) */ intel_dsi_device_ready(encoder); /* Prepare port in normal boot scenario */ if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot) intel_dsi_prepare(encoder, pipe_config); /* Send initialization commands in LP mode */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); /* * Enable port in pre-enable phase itself because as per hw team * recommendation, port should be enabled before plane & pipe */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); } else { msleep(20); /* XXX */ for_each_dsi_port(port, intel_dsi->ports) dpi_send_cmd(intel_dsi, TURN_ON, false, port); msleep(100); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); intel_dsi_port_enable(encoder, pipe_config); } intel_backlight_enable(pipe_config, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); } static void bxt_dsi_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { intel_crtc_vblank_on(crtc_state); } /* * DSI port disable has to be done after pipe and plane disable, so we do it in * the post_disable hook. */ static void intel_dsi_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(&i915->drm, "\n"); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_backlight_disable(old_conn_state); /* * According to the spec we should send SHUTDOWN before * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing * has shown that the v3 sequence works for v2 VBTs too */ if (is_vid_mode(intel_dsi)) { /* Send Shutdown command to the panel in LP mode */ for_each_dsi_port(port, intel_dsi->ports) dpi_send_cmd(intel_dsi, SHUTDOWN, false, port); msleep(10); } } static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (IS_GEMINILAKE(dev_priv)) glk_dsi_clear_device_ready(encoder); else vlv_dsi_clear_device_ready(encoder); } static void intel_dsi_post_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { intel_crtc_vblank_off(old_crtc_state); skl_scaler_disable(old_crtc_state); } if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); intel_dsi_port_disable(encoder); usleep_range(2000, 5000); } intel_dsi_unprepare(encoder); /* * if disable packets are sent before sending shutdown packet then in * some next enable sequence send turn on packet error is observed */ if (is_cmd_mode(intel_dsi)) intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); /* Transition to LP-00 */ intel_dsi_clear_device_ready(encoder); if (IS_BROXTON(dev_priv)) { /* Power down DSI regulator to save power */ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT); /* Add MIPI IO reset programming for modeset */ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { bxt_dsi_pll_disable(encoder); } else { vlv_dsi_pll_disable(encoder); intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), DPOUNIT_CLOCK_GATE_DISABLE, 0); } /* Assert reset */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); msleep(intel_dsi->panel_off_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); intel_dsi->panel_power_off_time = ktime_get_boottime(); } static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; bool active = false; drm_dbg_kms(&dev_priv->drm, "\n"); wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; /* * On Broxton the PLL needs to be enabled with a valid divider * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && !bxt_dsi_pll_is_enabled(dev_priv)) goto out_put_power; /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE; /* * Due to some hardware limitations on VLV/CHV, the DPI enable * bit in port C control register does not get set. As a * workaround, check pipe B conf instead. */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port == PORT_C) enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { u32 tmp = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)); enabled = tmp & CMD_MODE_DATA_WIDTH_MASK; } if (!enabled) continue; if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) continue; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C)) continue; *pipe = tmp; } else { *pipe = port == PORT_A ? PIPE_A : PIPE_B; } active = true; break; } out_put_power: intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return active; } static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; enum port port; u16 hactive, hfp, hsync, hbp, vfp, vsync; u16 hfp_sw, hsync_sw, hbp_sw; u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, crtc_hblank_start_sw, crtc_hblank_end_sw; /* FIXME: hw readout should not depend on SW state */ adjusted_mode_sw = &crtc->config->hw.adjusted_mode; /* * Atleast one port is active as encoder->get_config called only if * encoder->get_hw_state() returns true. */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) break; } fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); /* Enable Frame time stamo based scanline reporting */ pipe_config->mode_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; /* In terms of pixels */ adjusted_mode->crtc_hdisplay = intel_de_read(dev_priv, BXT_MIPI_TRANS_HACTIVE(port)); adjusted_mode->crtc_vdisplay = intel_de_read(dev_priv, BXT_MIPI_TRANS_VACTIVE(port)); adjusted_mode->crtc_vtotal = intel_de_read(dev_priv, BXT_MIPI_TRANS_VTOTAL(port)); hactive = adjusted_mode->crtc_hdisplay; hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port)); /* * Meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port)); hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port)); /* harizontal values are in terms of high speed byte clock */ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, intel_dsi->burst_mode_ratio); hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); if (intel_dsi->dual_link) { hfp *= 2; hsync *= 2; hbp *= 2; } /* vertical values are in terms of lines */ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port)); vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port)); adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start; adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay; adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start; adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; /* * In BXT DSI there is no regs programmed with few horizontal timings * in Pixels but txbyteclkhs.. So retrieval process adds some * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs. * Actually here for the given adjusted_mode, we are calculating the * value programmed to the port and then back to the horizontal timing * param in pixels. This is the expected value, including roundup errors * And if that is same as retrieved value from port, then * (HW state) adjusted_mode's horizontal timings are corrected to * match with SW state to nullify the errors. */ /* Calculating the value programmed to the Port register */ hfp_sw = adjusted_mode_sw->crtc_hsync_start - adjusted_mode_sw->crtc_hdisplay; hsync_sw = adjusted_mode_sw->crtc_hsync_end - adjusted_mode_sw->crtc_hsync_start; hbp_sw = adjusted_mode_sw->crtc_htotal - adjusted_mode_sw->crtc_hsync_end; if (intel_dsi->dual_link) { hfp_sw /= 2; hsync_sw /= 2; hbp_sw /= 2; } hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); /* Reverse calculating the adjusted mode parameters from port reg vals*/ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count, intel_dsi->burst_mode_ratio); if (intel_dsi->dual_link) { hfp_sw *= 2; hsync_sw *= 2; hbp_sw *= 2; } crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw + hsync_sw + hbp_sw; crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay; crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw; crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay; crtc_hblank_end_sw = crtc_htotal_sw; if (adjusted_mode->crtc_htotal == crtc_htotal_sw) adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal; if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw) adjusted_mode->crtc_hsync_start = adjusted_mode_sw->crtc_hsync_start; if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw) adjusted_mode->crtc_hsync_end = adjusted_mode_sw->crtc_hsync_end; if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw) adjusted_mode->crtc_hblank_start = adjusted_mode_sw->crtc_hblank_start; if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw) adjusted_mode->crtc_hblank_end = adjusted_mode_sw->crtc_hblank_end; } static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { bxt_dsi_get_pipe_config(encoder, pipe_config); pclk = bxt_dsi_get_pclk(encoder, pipe_config); } else { pclk = vlv_dsi_get_pclk(encoder, pipe_config); } pipe_config->port_clock = pclk; /* FIXME definitely not right for burst/cmd mode/pixel overlap */ pipe_config->hw.adjusted_mode.crtc_clock = pclk; if (intel_dsi->dual_link) pipe_config->hw.adjusted_mode.crtc_clock *= 2; } /* return txclkesc cycles in terms of divider and duration in us */ static u16 txclkesc(u32 divider, unsigned int us) { switch (divider) { case ESCAPE_CLOCK_DIVIDER_1: default: return 20 * us; case ESCAPE_CLOCK_DIVIDER_2: return 10 * us; case ESCAPE_CLOCK_DIVIDER_4: return 5 * us; } } static void set_dsi_timings(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; hactive = adjusted_mode->crtc_hdisplay; hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay; hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end; if (intel_dsi->dual_link) { hactive /= 2; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) hactive += intel_dsi->pixel_overlap; hfp /= 2; hsync /= 2; hbp /= 2; } vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay; vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end; /* horizontal values are in terms of high speed byte clock */ hactive = txbyteclkhs(hactive, bpp, lane_count, intel_dsi->burst_mode_ratio); hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio); hsync = txbyteclkhs(hsync, bpp, lane_count, intel_dsi->burst_mode_ratio); hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); for_each_dsi_port(port, intel_dsi->ports) { if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { /* * Program hdisplay and vdisplay on MIPI transcoder. * This is different from calculated hactive and * vactive, as they are calculated per channel basis, * whereas these values should be based on resolution. */ intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port), adjusted_mode->crtc_hdisplay); intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port), adjusted_mode->crtc_vdisplay); intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port), adjusted_mode->crtc_vtotal); } intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port), hactive); intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp); /* meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port), hsync); intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp); /* vertical values are in terms of lines */ intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp); intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port), vsync); intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp); } } static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt) { switch (fmt) { case MIPI_DSI_FMT_RGB888: return VID_MODE_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666: return VID_MODE_FORMAT_RGB666; case MIPI_DSI_FMT_RGB666_PACKED: return VID_MODE_FORMAT_RGB666_PACKED; case MIPI_DSI_FMT_RGB565: return VID_MODE_FORMAT_RGB565; default: MISSING_CASE(fmt); return VID_MODE_FORMAT_RGB666; } } static void intel_dsi_prepare(struct intel_encoder *intel_encoder, const struct intel_crtc_state *pipe_config) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 val, tmp; u16 mode_hdisplay; drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe)); mode_hdisplay = adjusted_mode->crtc_hdisplay; if (intel_dsi->dual_link) { mode_hdisplay /= 2; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) mode_hdisplay += intel_dsi->pixel_overlap; } for_each_dsi_port(port, intel_dsi->ports) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? */ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1); /* read request priority is per pipe */ tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= ~READ_REQUEST_PRIORITY_MASK; intel_de_write(dev_priv, MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { enum pipe pipe = crtc->pipe; intel_de_rmw(dev_priv, MIPI_CTRL(port), BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe)); } /* XXX: why here, why like this? handling in irq handler?! */ intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff); intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff); intel_de_write(dev_priv, MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg); intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port), adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); } set_dsi_timings(encoder, adjusted_mode); val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT; if (is_cmd_mode(intel_dsi)) { val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT; val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */ } else { val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT; val |= pixel_format_to_reg(intel_dsi->pixel_format); } tmp = 0; if (intel_dsi->eotp_pkt == 0) tmp |= EOT_DISABLE; if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { tmp |= BXT_DPHY_DEFEATURE_EN; if (!is_cmd_mode(intel_dsi)) tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; } for_each_dsi_port(port, intel_dsi->ports) { intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); /* timeouts for recovery. one frame IIUC. if counter expires, * EOT and stop state. */ /* * In burst mode, value greater than one DPI line Time in byte * clock (txbyteclkhs) To timeout this timer 1+ of the above * said value is recommended. * * In non-burst mode, Value greater than one DPI frame time in * byte clock(txbyteclkhs) To timeout this timer 1+ of the above * said value is recommended. * * In DBI only mode, value greater than one DBI frame time in * byte clock(txbyteclkhs) To timeout this timer 1+ of the above * said value is recommended. */ if (is_vid_mode(intel_dsi) && intel_dsi->video_mode == BURST_MODE) { intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } else { intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout); intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port), intel_dsi->turn_arnd_val); intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port), intel_dsi->rst_timer_val); /* dphy stuff */ /* in terms of low power clock */ intel_de_write(dev_priv, MIPI_INIT_COUNT(port), txclkesc(intel_dsi->escape_clk_div, 100)); if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && !intel_dsi->dual_link) { /* * BXT spec says write MIPI_INIT_COUNT for * both the ports, even if only one is * getting used. So write the other port * if not in dual link mode. */ intel_de_write(dev_priv, MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A), intel_dsi->init_count); } /* recovery disables */ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp); /* in terms of low power clock */ intel_de_write(dev_priv, MIPI_INIT_COUNT(port), intel_dsi->init_count); /* in terms of txbyteclkhs. actual high to low switch + * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. * * XXX: write MIPI_STOP_STATE_STALL? */ intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port), intel_dsi->hs_to_lp_count); /* XXX: low power clock equivalence in terms of byte clock. * the number of byte clocks occupied in one low power clock. * based on txbyteclkhs and txclkesc. * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL * ) / 105.??? */ intel_de_write(dev_priv, MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk); if (IS_GEMINILAKE(dev_priv)) { intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port), intel_dsi->lp_byte_clk); /* Shadow of DPHY reg */ intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port), intel_dsi->dphy_reg); } /* the bw essential for transmitting 16 long packets containing * 252 bytes meant for dcs write memory command is programmed in * this register in terms of byte clocks. based on dsi transfer * rate and the number of lanes configured the time taken to * transmit 16 long packets in a dsi stream varies. */ intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer); intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port), intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); if (is_vid_mode(intel_dsi)) { u32 fmt = intel_dsi->video_frmt_cfg_bits | IP_TG_CONFIG; /* * Some panels might have resolution which is not a * multiple of 64 like 1366 x 768. Enable RANDOM * resolution support for such panels by default. */ fmt |= RANDOM_DPI_DISPLAY_RESOLUTION; switch (intel_dsi->video_mode) { default: MISSING_CASE(intel_dsi->video_mode); fallthrough; case NON_BURST_SYNC_EVENTS: fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS; break; case NON_BURST_SYNC_PULSE: fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE; break; case BURST_MODE: fmt |= VIDEO_MODE_BURST; break; } intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt); } } } static void intel_dsi_unprepare(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; if (IS_GEMINILAKE(dev_priv)) return; for_each_dsi_port(port, intel_dsi->ports) { /* Panel commands can be sent when clock is in LP11 */ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_dsi_reset_clocks(encoder, port); else vlv_dsi_reset_clocks(encoder, port); intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); } } static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); intel_dsi_vbt_gpio_cleanup(intel_dsi); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_dsi_funcs = { .destroy = intel_dsi_encoder_destroy, }; static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, .mode_valid = intel_dsi_mode_valid, .atomic_check = intel_digital_connector_atomic_check, }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static void vlv_dsi_add_properties(struct intel_connector *connector) { const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); intel_attach_scaling_mode_property(&connector->base); drm_connector_set_panel_orientation_with_quirk(&connector->base, intel_dsi_get_panel_orientation(connector), fixed_mode->hdisplay, fixed_mode->vdisplay); } #define NS_KHZ_RATIO 1000000 #define PREPARE_CNT_MAX 0x3F #define EXIT_ZERO_CNT_MAX 0x3F #define CLK_ZERO_CNT_MAX 0xFF #define TRAIL_CNT_MAX 0x1F static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns, extra_byte_count, tlpx_ui; u32 ui_num, ui_den; u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; u32 ths_prepare_ns, tclk_trail_ns; u32 tclk_prepare_clkzero, ths_prepare_hszero; u32 lp_to_hs_switch, hs_to_lp_switch; u32 mul; tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); switch (intel_dsi->lane_count) { case 1: case 2: extra_byte_count = 2; break; case 3: extra_byte_count = 4; break; case 4: default: extra_byte_count = 3; break; } /* in Kbps */ ui_num = NS_KHZ_RATIO; ui_den = intel_dsi_bitrate(intel_dsi); tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; ths_prepare_hszero = mipi_config->ths_prepare_hszero; /* * B060 * LP byte clock = TLPX/ (8UI) */ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num); /* DDR clock period = 2 * UI * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ) * UI(nsec) = 10^6 / bitrate * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate * DDR clock count = ns_value / DDR clock period * * For GEMINILAKE dphy_param_reg will be programmed in terms of * HS byte clock count for other platform in HS ddr clock count */ mul = IS_GEMINILAKE(dev_priv) ? 8 : 2; ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); /* prepare count */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); if (prepare_cnt > PREPARE_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n", prepare_cnt); prepare_cnt = PREPARE_CNT_MAX; } /* exit zero count */ exit_zero_cnt = DIV_ROUND_UP( (ths_prepare_hszero - ths_prepare_ns) * ui_den, ui_num * mul ); /* * Exit zero is unified val ths_zero and ths_exit * minimum value for ths_exit = 110ns * min (exit_zero_cnt * 2) = 110/UI * exit_zero_cnt = 55/UI */ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num) exit_zero_cnt += 1; if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n", exit_zero_cnt); exit_zero_cnt = EXIT_ZERO_CNT_MAX; } /* clk zero count */ clk_zero_cnt = DIV_ROUND_UP( (tclk_prepare_clkzero - ths_prepare_ns) * ui_den, ui_num * mul); if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n", clk_zero_cnt); clk_zero_cnt = CLK_ZERO_CNT_MAX; } /* trail count */ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); if (trail_cnt > TRAIL_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n", trail_cnt); trail_cnt = TRAIL_CNT_MAX; } /* B080 */ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 | clk_zero_cnt << 8 | prepare_cnt; /* * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT * * mul + 10UI + Extra Byte Count * * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count * Extra Byte Count is calculated according to number of lanes. * High Low Switch Count is the Max of LP to HS and * HS to LP switch count * */ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num); /* B044 */ /* FIXME: * The comment above does not match with the code */ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul + exit_zero_cnt * mul + 10, 8); hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8); intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch); intel_dsi->hs_to_lp_count += extra_byte_count; /* B088 */ /* LP -> HS for clock lanes * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero + * extra byte count * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt * * 2(in UI) + extra byte count * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) / * 8 + extra byte count */ intel_dsi->clk_lp_to_hs_count = DIV_ROUND_UP( 4 * tlpx_ui + prepare_cnt * 2 + clk_zero_cnt * 2, 8); intel_dsi->clk_lp_to_hs_count += extra_byte_count; /* HS->LP for Clock Lanes * Low Power clock synchronisations + 1Tx byteclk + tclk_trail + * Extra byte count * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 + * Extra byte count */ intel_dsi->clk_hs_to_lp_count = DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, 8); intel_dsi->clk_hs_to_lp_count += extra_byte_count; intel_dsi_log_params(intel_dsi); } void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct intel_dsi *intel_dsi; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_display_mode *current_mode; enum port port; enum pipe pipe; drm_dbg_kms(&dev_priv->drm, "\n"); /* There is no detection method for MIPI so rely on VBT */ if (!intel_bios_is_dsi_present(dev_priv, &port)) return; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE; else dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) return; intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(intel_dsi); return; } intel_encoder = &intel_dsi->base; encoder = &intel_encoder->base; intel_dsi->attached_connector = intel_connector; connector = &intel_connector->base; drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) intel_encoder->enable = bxt_dsi_enable; intel_encoder->disable = intel_dsi_disable; intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; intel_encoder->get_config = intel_dsi_get_config; intel_encoder->update_pipe = intel_backlight_update; intel_encoder->shutdown = intel_dsi_shutdown; intel_connector->get_hw_state = intel_connector_get_hw_state; intel_encoder->port = port; intel_encoder->type = INTEL_OUTPUT_DSI; intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI; intel_encoder->cloneable = 0; /* * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI * port C. BXT isn't limited like this. */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) intel_encoder->pipe_mask = ~0; else if (port == PORT_A) intel_encoder->pipe_mask = BIT(PIPE_A); else intel_encoder->pipe_mask = BIT(PIPE_B); intel_dsi->panel_power_off_time = ktime_get_boottime(); intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL); if (intel_connector->panel.vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); else intel_dsi->ports = BIT(port); if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops, port); if (!host) goto err; intel_dsi->dsi_hosts[port] = host; } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { drm_dbg_kms(&dev_priv->drm, "no device found\n"); goto err; } /* Use clock read-back from current hw-state for fastboot */ current_mode = intel_encoder_current_mode(intel_encoder); if (current_mode) { drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n", intel_dsi->pclk, current_mode->clock); if (intel_fuzzy_clock_check(intel_dsi->pclk, current_mode->clock)) { drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n"); intel_dsi->pclk = current_mode->clock; } kfree(current_mode); } vlv_dphy_param_init(intel_dsi); intel_dsi_vbt_gpio_init(intel_dsi, intel_dsi_get_hw_state(intel_encoder, &pipe)); drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/ intel_connector_attach_encoder(intel_connector, intel_encoder); mutex_lock(&dev_priv->drm.mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); mutex_unlock(&dev_priv->drm.mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { drm_dbg_kms(&dev_priv->drm, "no fixed mode\n"); goto err_cleanup_connector; } intel_panel_init(intel_connector, NULL); intel_backlight_setup(intel_connector, INVALID_PIPE); vlv_dsi_add_properties(intel_connector); return; err_cleanup_connector: drm_connector_cleanup(&intel_connector->base); err: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dsi); kfree(intel_connector); }
linux-master
drivers/gpu/drm/i915/display/vlv_dsi.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include <drm/drm_edid.h> #include "i915_drv.h" #include "intel_crtc_state_dump.h" #include "intel_display_types.h" #include "intel_hdmi.h" #include "intel_vrr.h" static void intel_dump_crtc_timings(struct drm_i915_private *i915, const struct drm_display_mode *mode) { drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, " "hd=%d hb=%d-%d hs=%d-%d ht=%d, " "vd=%d vb=%d-%d vs=%d-%d vt=%d, " "flags=0x%x\n", mode->crtc_clock, mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, mode->flags); } static void intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, const char *id, unsigned int lane_count, const struct intel_link_m_n *m_n) { struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); drm_dbg_kms(&i915->drm, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", id, lane_count, m_n->data_m, m_n->data_n, m_n->link_m, m_n->link_n, m_n->tu); } static void intel_dump_infoframe(struct drm_i915_private *i915, const union hdmi_infoframe *frame) { if (!drm_debug_enabled(DRM_UT_KMS)) return; hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame); } static void intel_dump_dp_vsc_sdp(struct drm_i915_private *i915, const struct drm_dp_vsc_sdp *vsc) { if (!drm_debug_enabled(DRM_UT_KMS)) return; drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc); } static void intel_dump_buffer(struct drm_i915_private *i915, const char *prefix, const u8 *buf, size_t len) { if (!drm_debug_enabled(DRM_UT_KMS)) return; print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 0, buf, len, false); } #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x static const char * const output_type_str[] = { OUTPUT_TYPE(UNUSED), OUTPUT_TYPE(ANALOG), OUTPUT_TYPE(DVO), OUTPUT_TYPE(SDVO), OUTPUT_TYPE(LVDS), OUTPUT_TYPE(TVOUT), OUTPUT_TYPE(HDMI), OUTPUT_TYPE(DP), OUTPUT_TYPE(EDP), OUTPUT_TYPE(DSI), OUTPUT_TYPE(DDI), OUTPUT_TYPE(DP_MST), }; #undef OUTPUT_TYPE static void snprintf_output_types(char *buf, size_t len, unsigned int output_types) { char *str = buf; int i; str[0] = '\0'; for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { int r; if ((output_types & BIT(i)) == 0) continue; r = snprintf(str, len, "%s%s", str != buf ? "," : "", output_type_str[i]); if (r >= len) break; str += r; len -= r; output_types &= ~BIT(i); } WARN_ON_ONCE(output_types != 0); } static const char * const output_format_str[] = { [INTEL_OUTPUT_FORMAT_RGB] = "RGB", [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", }; const char *intel_output_format_name(enum intel_output_format format) { if (format >= ARRAY_SIZE(output_format_str)) return "invalid"; return output_format_str[format]; } static void intel_dump_plane_state(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; if (!fb) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", plane->base.base.id, plane->base.name, str_yes_no(plane_state->uapi.visible)); return; } drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n", plane->base.base.id, plane->base.name, fb->base.id, fb->width, fb->height, &fb->format->format, fb->modifier, str_yes_no(plane_state->uapi.visible)); drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n", plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter); if (plane_state->uapi.visible) drm_dbg_kms(&i915->drm, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", DRM_RECT_FP_ARG(&plane_state->uapi.src), DRM_RECT_ARG(&plane_state->uapi.dst)); } static void ilk_dump_csc(struct drm_i915_private *i915, const char *name, const struct intel_csc_matrix *csc) { int i; drm_dbg_kms(&i915->drm, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name, csc->preoff[0], csc->preoff[1], csc->preoff[2]); for (i = 0; i < 3; i++) drm_dbg_kms(&i915->drm, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, csc->coeff[3 * i + 0], csc->coeff[3 * i + 1], csc->coeff[3 * i + 2]); if (DISPLAY_VER(i915) < 7) return; drm_dbg_kms(&i915->drm, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name, csc->postoff[0], csc->postoff[1], csc->postoff[2]); } static void vlv_dump_csc(struct drm_i915_private *i915, const char *name, const struct intel_csc_matrix *csc) { int i; for (i = 0; i < 3; i++) drm_dbg_kms(&i915->drm, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name, csc->coeff[3 * i + 0], csc->coeff[3 * i + 1], csc->coeff[3 * i + 2]); } void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, struct intel_atomic_state *state, const char *context) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_plane_state *plane_state; struct intel_plane *plane; char buf[64]; int i; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n", crtc->base.base.id, crtc->base.name, str_yes_no(pipe_config->hw.enable), context); if (!pipe_config->hw.enable) goto dump_planes; snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); drm_dbg_kms(&i915->drm, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n", str_yes_no(pipe_config->hw.active), buf, pipe_config->output_types, intel_output_format_name(pipe_config->output_format), intel_output_format_name(pipe_config->sink_format)); drm_dbg_kms(&i915->drm, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n", transcoder_name(pipe_config->mst_master_transcoder)); drm_dbg_kms(&i915->drm, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", transcoder_name(pipe_config->master_transcoder), pipe_config->sync_mode_slaves_mask); drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n", intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" : intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no", pipe_config->bigjoiner_pipes); drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n", str_enabled_disabled(pipe_config->splitter.enable), pipe_config->splitter.link_count, pipe_config->splitter.pixel_overlap); if (pipe_config->has_pch_encoder) intel_dump_m_n_config(pipe_config, "fdi", pipe_config->fdi_lanes, &pipe_config->fdi_m_n); if (intel_crtc_has_dp_encoder(pipe_config)) { intel_dump_m_n_config(pipe_config, "dp m_n", pipe_config->lane_count, &pipe_config->dp_m_n); intel_dump_m_n_config(pipe_config, "dp m2_n2", pipe_config->lane_count, &pipe_config->dp_m2_n2); } drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n", pipe_config->framestart_delay, pipe_config->msa_timing_delay); drm_dbg_kms(&i915->drm, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", pipe_config->has_audio, pipe_config->has_infoframe, pipe_config->infoframes.enable); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) drm_dbg_kms(&i915->drm, "GCP: 0x%x\n", pipe_config->infoframes.gcp); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) intel_dump_infoframe(i915, &pipe_config->infoframes.avi); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) intel_dump_infoframe(i915, &pipe_config->infoframes.spd); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) intel_dump_infoframe(i915, &pipe_config->infoframes.drm); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) intel_dump_infoframe(i915, &pipe_config->infoframes.drm); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_VSC)) intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc); if (pipe_config->has_audio) intel_dump_buffer(i915, "ELD: ", pipe_config->eld, drm_eld_size(pipe_config->eld)); drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n", str_yes_no(pipe_config->vrr.enable), pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, pipe_config->vrr.flipline, intel_vrr_vmin_vblank_start(pipe_config), intel_vrr_vmax_vblank_start(pipe_config)); drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&pipe_config->hw.mode)); drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&pipe_config->hw.adjusted_mode)); intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode); drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&pipe_config->hw.pipe_mode)); intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode); drm_dbg_kms(&i915->drm, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n", pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src), pipe_config->pixel_rate); drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n", pipe_config->linetime, pipe_config->ips_linetime); if (DISPLAY_VER(i915) >= 9) drm_dbg_kms(&i915->drm, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", crtc->num_scalers, pipe_config->scaler_state.scaler_users, pipe_config->scaler_state.scaler_id, pipe_config->hw.scaling_filter); if (HAS_GMCH(i915)) drm_dbg_kms(&i915->drm, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.pgm_ratios, pipe_config->gmch_pfit.lvds_border_bits); else drm_dbg_kms(&i915->drm, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n", DRM_RECT_ARG(&pipe_config->pch_pfit.dst), str_enabled_disabled(pipe_config->pch_pfit.enabled), str_yes_no(pipe_config->pch_pfit.force_thru)); drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n", pipe_config->ips_enabled, pipe_config->double_wide, pipe_config->has_drrs); intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state); if (IS_CHERRYVIEW(i915)) drm_dbg_kms(&i915->drm, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", pipe_config->cgm_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); else drm_dbg_kms(&i915->drm, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", pipe_config->csc_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n", pipe_config->pre_csc_lut && pipe_config->pre_csc_lut == i915->display.color.glk_linear_degamma_lut ? "(linear) " : "", pipe_config->pre_csc_lut ? drm_color_lut_size(pipe_config->pre_csc_lut) : 0, pipe_config->post_csc_lut ? drm_color_lut_size(pipe_config->post_csc_lut) : 0); if (DISPLAY_VER(i915) >= 11) ilk_dump_csc(i915, "output csc", &pipe_config->output_csc); if (!HAS_GMCH(i915)) ilk_dump_csc(i915, "pipe csc", &pipe_config->csc); else if (IS_CHERRYVIEW(i915)) vlv_dump_csc(i915, "cgm csc", &pipe_config->csc); else if (IS_VALLEYVIEW(i915)) vlv_dump_csc(i915, "wgc csc", &pipe_config->csc); dump_planes: if (!state) return; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe == crtc->pipe) intel_dump_plane_state(plane_state); } }
linux-master
drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * */ #include "intel_display_types.h" #include "intel_dvo_dev.h" #define CH7017_TV_DISPLAY_MODE 0x00 #define CH7017_FLICKER_FILTER 0x01 #define CH7017_VIDEO_BANDWIDTH 0x02 #define CH7017_TEXT_ENHANCEMENT 0x03 #define CH7017_START_ACTIVE_VIDEO 0x04 #define CH7017_HORIZONTAL_POSITION 0x05 #define CH7017_VERTICAL_POSITION 0x06 #define CH7017_BLACK_LEVEL 0x07 #define CH7017_CONTRAST_ENHANCEMENT 0x08 #define CH7017_TV_PLL 0x09 #define CH7017_TV_PLL_M 0x0a #define CH7017_TV_PLL_N 0x0b #define CH7017_SUB_CARRIER_0 0x0c #define CH7017_CIV_CONTROL 0x10 #define CH7017_CIV_0 0x11 #define CH7017_CHROMA_BOOST 0x14 #define CH7017_CLOCK_MODE 0x1c #define CH7017_INPUT_CLOCK 0x1d #define CH7017_GPIO_CONTROL 0x1e #define CH7017_INPUT_DATA_FORMAT 0x1f #define CH7017_CONNECTION_DETECT 0x20 #define CH7017_DAC_CONTROL 0x21 #define CH7017_BUFFERED_CLOCK_OUTPUT 0x22 #define CH7017_DEFEAT_VSYNC 0x47 #define CH7017_TEST_PATTERN 0x48 #define CH7017_POWER_MANAGEMENT 0x49 /** Enables the TV output path. */ #define CH7017_TV_EN (1 << 0) #define CH7017_DAC0_POWER_DOWN (1 << 1) #define CH7017_DAC1_POWER_DOWN (1 << 2) #define CH7017_DAC2_POWER_DOWN (1 << 3) #define CH7017_DAC3_POWER_DOWN (1 << 4) /** Powers down the TV out block, and DAC0-3 */ #define CH7017_TV_POWER_DOWN_EN (1 << 5) #define CH7017_VERSION_ID 0x4a #define CH7017_DEVICE_ID 0x4b #define CH7017_DEVICE_ID_VALUE 0x1b #define CH7018_DEVICE_ID_VALUE 0x1a #define CH7019_DEVICE_ID_VALUE 0x19 #define CH7017_XCLK_D2_ADJUST 0x53 #define CH7017_UP_SCALER_COEFF_0 0x55 #define CH7017_UP_SCALER_COEFF_1 0x56 #define CH7017_UP_SCALER_COEFF_2 0x57 #define CH7017_UP_SCALER_COEFF_3 0x58 #define CH7017_UP_SCALER_COEFF_4 0x59 #define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a #define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b #define CH7017_GPIO_INVERT 0x5c #define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d #define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e #define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f /**< Low bits of horizontal active pixel input */ #define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60 /** High bits of horizontal active pixel input */ #define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0) /** High bits of vertical active line output */ #define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3) #define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61 /**< Low bits of vertical active line output */ #define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62 /**< Low bits of horizontal active pixel output */ #define CH7017_LVDS_POWER_DOWN 0x63 /** High bits of horizontal active pixel output */ #define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0) /** Enables the LVDS power down state transition */ #define CH7017_LVDS_POWER_DOWN_EN (1 << 6) /** Enables the LVDS upscaler */ #define CH7017_LVDS_UPSCALER_EN (1 << 7) #define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08 #define CH7017_LVDS_ENCODING 0x64 #define CH7017_LVDS_DITHER_2D (1 << 2) #define CH7017_LVDS_DITHER_DIS (1 << 3) #define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4) #define CH7017_LVDS_24_BIT (1 << 5) #define CH7017_LVDS_ENCODING_2 0x65 #define CH7017_LVDS_PLL_CONTROL 0x66 /** Enables the LVDS panel output path */ #define CH7017_LVDS_PANEN (1 << 0) /** Enables the LVDS panel backlight */ #define CH7017_LVDS_BKLEN (1 << 3) #define CH7017_POWER_SEQUENCING_T1 0x67 #define CH7017_POWER_SEQUENCING_T2 0x68 #define CH7017_POWER_SEQUENCING_T3 0x69 #define CH7017_POWER_SEQUENCING_T4 0x6a #define CH7017_POWER_SEQUENCING_T5 0x6b #define CH7017_GPIO_DRIVER_TYPE 0x6c #define CH7017_GPIO_DATA 0x6d #define CH7017_GPIO_DIRECTION_CONTROL 0x6e #define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71 # define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4 # define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0 # define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80 #define CH7017_LVDS_PLL_VCO_CONTROL 0x72 # define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80 # define CH7017_LVDS_PLL_VCO_SHIFT 4 # define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0 #define CH7017_OUTPUTS_ENABLE 0x73 # define CH7017_CHARGE_PUMP_LOW 0x0 # define CH7017_CHARGE_PUMP_HIGH 0x3 # define CH7017_LVDS_CHANNEL_A (1 << 3) # define CH7017_LVDS_CHANNEL_B (1 << 4) # define CH7017_TV_DAC_A (1 << 5) # define CH7017_TV_DAC_B (1 << 6) # define CH7017_DDC_SELECT_DC2 (1 << 7) #define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74 #define CH7017_LVDS_PLL_EMI_REDUCTION 0x75 #define CH7017_LVDS_POWER_DOWN_FLICKER 0x76 #define CH7017_LVDS_CONTROL_2 0x78 # define CH7017_LOOP_FILTER_SHIFT 5 # define CH7017_PHASE_DETECTOR_SHIFT 0 #define CH7017_BANG_LIMIT_CONTROL 0x7f struct ch7017_priv { u8 dummy; }; static void ch7017_dump_regs(struct intel_dvo_device *dvo); static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = val, } }; return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; } static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) { u8 buf[2] = { addr, val }; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = buf, }; return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; } /** Probes for a CH7017 on the given bus and slave address. */ static bool ch7017_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { struct ch7017_priv *priv; const char *str; u8 val; priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); if (priv == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = priv; if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) goto fail; switch (val) { case CH7017_DEVICE_ID_VALUE: str = "ch7017"; break; case CH7018_DEVICE_ID_VALUE: str = "ch7018"; break; case CH7019_DEVICE_ID_VALUE: str = "ch7019"; break; default: DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " "slave %d.\n", val, adapter->name, dvo->slave_addr); goto fail; } DRM_DEBUG_KMS("%s detected on %s, addr %d\n", str, adapter->name, dvo->slave_addr); return true; fail: kfree(priv); return false; } static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) { return connector_status_connected; } static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { if (mode->clock > 160000) return MODE_CLOCK_HIGH; return MODE_OK; } static void ch7017_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { u8 lvds_pll_feedback_div, lvds_pll_vco_control; u8 outputs_enable, lvds_control_2, lvds_power_down; u8 horizontal_active_pixel_input; u8 horizontal_active_pixel_output, vertical_active_line_output; u8 active_input_line_output; DRM_DEBUG_KMS("Registers before mode setting\n"); ch7017_dump_regs(dvo); /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ if (mode->clock < 100000) { outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW; lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | (2 << CH7017_LVDS_PLL_VCO_SHIFT) | (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) | (0 << CH7017_PHASE_DETECTOR_SHIFT); } else { outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH; lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) | (0 << CH7017_PHASE_DETECTOR_SHIFT); if (1) { /* XXX: dual channel panel detection. Assume yes for now. */ outputs_enable |= CH7017_LVDS_CHANNEL_B; lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | (2 << CH7017_LVDS_PLL_VCO_SHIFT) | (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); } else { lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | (1 << CH7017_LVDS_PLL_VCO_SHIFT) | (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); } } horizontal_active_pixel_input = mode->hdisplay & 0x00ff; vertical_active_line_output = mode->vdisplay & 0x00ff; horizontal_active_pixel_output = mode->hdisplay & 0x00ff; active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) | (((mode->vdisplay & 0x0700) >> 8) << 3); lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | (mode->hdisplay & 0x0700) >> 8; ch7017_dpms(dvo, false); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, horizontal_active_pixel_input); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, horizontal_active_pixel_output); ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, vertical_active_line_output); ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, active_input_line_output); ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control); ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div); ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2); ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable); /* Turn the LVDS back on with new settings. */ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); DRM_DEBUG_KMS("Registers after mode setting\n"); ch7017_dump_regs(dvo); } /* set the CH7017 power state */ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) { u8 val; ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); /* Turn off TV/VGA, and never turn it on since we don't support it. */ ch7017_write(dvo, CH7017_POWER_MANAGEMENT, CH7017_DAC0_POWER_DOWN | CH7017_DAC1_POWER_DOWN | CH7017_DAC2_POWER_DOWN | CH7017_DAC3_POWER_DOWN | CH7017_TV_POWER_DOWN_EN); if (enable) { /* Turn on the LVDS */ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, val & ~CH7017_LVDS_POWER_DOWN_EN); } else { /* Turn off the LVDS */ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, val | CH7017_LVDS_POWER_DOWN_EN); } /* XXX: Should actually wait for update power status somehow */ msleep(20); } static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) { u8 val; ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); if (val & CH7017_LVDS_POWER_DOWN_EN) return false; else return true; } static void ch7017_dump_regs(struct intel_dvo_device *dvo) { u8 val; #define DUMP(reg) \ do { \ ch7017_read(dvo, reg, &val); \ DRM_DEBUG_KMS(#reg ": %02x\n", val); \ } while (0) DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT); DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT); DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT); DUMP(CH7017_LVDS_PLL_VCO_CONTROL); DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV); DUMP(CH7017_LVDS_CONTROL_2); DUMP(CH7017_OUTPUTS_ENABLE); DUMP(CH7017_LVDS_POWER_DOWN); } static void ch7017_destroy(struct intel_dvo_device *dvo) { struct ch7017_priv *priv = dvo->dev_priv; if (priv) { kfree(priv); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops ch7017_ops = { .init = ch7017_init, .detect = ch7017_detect, .mode_valid = ch7017_mode_valid, .mode_set = ch7017_mode_set, .dpms = ch7017_dpms, .get_hw_state = ch7017_get_hw_state, .dump_regs = ch7017_dump_regs, .destroy = ch7017_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_ch7017.c
/* * Copyright © 2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Pierre-Louis Bossart <[email protected]> * Jerome Anand <[email protected]> * based on VED patches * */ /** * DOC: LPE Audio integration for HDMI or DP playback * * Motivation: * Atom platforms (e.g. valleyview and cherryTrail) integrates a DMA-based * interface as an alternative to the traditional HDaudio path. While this * mode is unrelated to the LPE aka SST audio engine, the documentation refers * to this mode as LPE so we keep this notation for the sake of consistency. * * The interface is handled by a separate standalone driver maintained in the * ALSA subsystem for simplicity. To minimize the interaction between the two * subsystems, a bridge is setup between the hdmi-lpe-audio and i915: * 1. Create a platform device to share MMIO/IRQ resources * 2. Make the platform device child of i915 device for runtime PM. * 3. Create IRQ chip to forward the LPE audio irqs. * the hdmi-lpe-audio driver probes the lpe audio device and creates a new * sound card * * Threats: * Due to the restriction in Linux platform device model, user need manually * uninstall the hdmi-lpe-audio driver before uninstalling i915 module, * otherwise we might run into use-after-free issues after i915 removes the * platform device: even though hdmi-lpe-audio driver is released, the modules * is still in "installed" status. * * Implementation: * The MMIO/REG platform resources are created according to the registers * specification. * When forwarding LPE audio irqs, the flow control handler selection depends * on the platform, for example on valleyview handle_simple_irq is enough. * */ #include <linux/acpi.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/intel_lpe_audio.h> #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_lpe_audio.h" #include "intel_pci_config.h" #define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct platform_device_info pinfo = {}; struct resource *rsc; struct platform_device *platdev; struct intel_hdmi_lpe_audio_pdata *pdata; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); rsc = kcalloc(2, sizeof(*rsc), GFP_KERNEL); if (!rsc) { kfree(pdata); return ERR_PTR(-ENOMEM); } rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; rsc[1].start = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE; rsc[1].end = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1; rsc[1].flags = IORESOURCE_MEM; rsc[1].name = "hdmi-lpe-audio-mmio"; pinfo.parent = dev_priv->drm.dev; pinfo.name = "hdmi-lpe-audio"; pinfo.id = -1; pinfo.res = rsc; pinfo.num_res = 2; pinfo.data = pdata; pinfo.size_data = sizeof(*pdata); pinfo.dma_mask = DMA_BIT_MASK(32); pdata->num_pipes = INTEL_NUM_PIPES(dev_priv); pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */ pdata->port[0].pipe = -1; pdata->port[1].pipe = -1; pdata->port[2].pipe = -1; spin_lock_init(&pdata->lpe_audio_slock); platdev = platform_device_register_full(&pinfo); kfree(rsc); kfree(pdata); if (IS_ERR(platdev)) { drm_err(&dev_priv->drm, "Failed to allocate LPE audio platform device\n"); return platdev; } pm_runtime_no_callbacks(&platdev->dev); return platdev; } static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) { /* XXX Note that platform_device_register_full() allocates a dma_mask * and never frees it. We can't free it here as we cannot guarantee * this is the last reference (i.e. that the dma_mask will not be * used after our unregister). So ee choose to leak the sizeof(u64) * allocation here - it should be fixed in the platform_device rather * than us fiddle with its internals. */ platform_device_unregister(dev_priv->display.audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) { } static void lpe_audio_irq_mask(struct irq_data *d) { } static struct irq_chip lpe_audio_irqchip = { .name = "hdmi_lpe_audio_irqchip", .irq_mask = lpe_audio_irq_mask, .irq_unmask = lpe_audio_irq_unmask, }; static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { int irq = dev_priv->display.audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip, handle_simple_irq, "hdmi_lpe_audio_irq_handler"); return irq_set_chip_data(irq, dev_priv); } static bool lpe_audio_detect(struct drm_i915_private *dev_priv) { int lpe_present = false; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { static const struct pci_device_id atom_hdaudio_ids[] = { /* Baytrail */ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)}, /* Braswell */ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2284)}, {} }; if (!pci_dev_present(atom_hdaudio_ids)) { drm_info(&dev_priv->drm, "HDaudio controller not detected, using LPE audio instead\n"); lpe_present = true; } } return lpe_present; } static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; dev_priv->display.audio.lpe.irq = irq_alloc_desc(0); if (dev_priv->display.audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", dev_priv->display.audio.lpe.irq); ret = dev_priv->display.audio.lpe.irq; goto err; } drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); if (ret) { drm_err(&dev_priv->drm, "Failed to initialize irqchip for lpe audio: %d\n", ret); goto err_free_irq; } dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); if (IS_ERR(dev_priv->display.audio.lpe.platdev)) { ret = PTR_ERR(dev_priv->display.audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); goto err_free_irq; } /* enable chicken bit; at least this is required for Dell Wyse 3040 * with DP outputs (but only sometimes by some reason!) */ intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE); return 0; err_free_irq: irq_free_desc(dev_priv->display.audio.lpe.irq); err: dev_priv->display.audio.lpe.irq = -1; dev_priv->display.audio.lpe.platdev = NULL; return ret; } /** * intel_lpe_audio_irq_handler() - forwards the LPE audio irq * @dev_priv: the i915 drm device private data * * the LPE Audio irq is forwarded to the irq handler registered by LPE audio * driver. */ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) { int ret; if (!HAS_LPE_AUDIO(dev_priv)) return; ret = generic_handle_irq(dev_priv->display.audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); } /** * intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio * driver and i915 * @dev_priv: the i915 drm device private data * * Return: 0 if successful. non-zero if detection or * llocation/initialization fails */ int intel_lpe_audio_init(struct drm_i915_private *dev_priv) { int ret = -ENODEV; if (lpe_audio_detect(dev_priv)) { ret = lpe_audio_setup(dev_priv); if (ret < 0) drm_err(&dev_priv->drm, "failed to setup LPE Audio bridge\n"); } return ret; } /** * intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE * audio driver and i915 * @dev_priv: the i915 drm device private data * * release all the resources for LPE audio <-> i915 bridge. */ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) { if (!HAS_LPE_AUDIO(dev_priv)) return; lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->display.audio.lpe.irq); dev_priv->display.audio.lpe.irq = -1; dev_priv->display.audio.lpe.platdev = NULL; } /** * intel_lpe_audio_notify() - notify lpe audio event * audio driver and i915 * @dev_priv: the i915 drm device private data * @cpu_transcoder: CPU transcoder * @port: port * @eld : ELD data * @ls_clock: Link symbol clock in kHz * @dp_output: Driving a DP output? * * Notify lpe audio driver of eld change. */ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output) { unsigned long irqflags; struct intel_hdmi_lpe_audio_pdata *pdata; struct intel_hdmi_lpe_audio_port_pdata *ppdata; u32 audio_enable; if (!HAS_LPE_AUDIO(dev_priv)) return; pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port)); if (eld != NULL) { memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES); ppdata->pipe = cpu_transcoder; ppdata->ls_clock = ls_clock; ppdata->dp_output = dp_output; /* Unmute the amp for both DP and HDMI */ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port), audio_enable & ~VLV_AMP_MUTE); } else { memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES); ppdata->pipe = -1; ppdata->ls_clock = 0; ppdata->dp_output = false; /* Mute the amp for both DP and HDMI */ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port), audio_enable | VLV_AMP_MUTE); } if (pdata->notify_audio_lpe) pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); }
linux-master
drivers/gpu/drm/i915/display/intel_lpe_audio.c
/* * Copyright © 2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "i915_reg.h" #include "intel_color.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsb.h" struct intel_color_funcs { int (*color_check)(struct intel_crtc_state *crtc_state); /* * Program non-arming double buffered color management registers * before vblank evasion. The registers should then latch after * the arming register is written (by color_commit_arm()) during * the next vblank start, alongside any other double buffered * registers involved with the same commit. This hook is optional. */ void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state); /* * Program arming double buffered color management registers * during vblank evasion. The registers (and whatever other registers * they arm that were written by color_commit_noarm) should then latch * during the next vblank start, alongside any other double buffered * registers involved with the same commit. */ void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); /* * Perform any extra tasks needed after all the * double buffered registers have been latched. */ void (*color_post_update)(const struct intel_crtc_state *crtc_state); /* * Load LUTs (and other single buffered color management * registers). Will (hopefully) be called during the vblank * following the latching of any double buffered registers * involved with the same commit. */ void (*load_luts)(const struct intel_crtc_state *crtc_state); /* * Read out the LUTs from the hardware into the software state. * Used by eg. the hardware state checker. */ void (*read_luts)(struct intel_crtc_state *crtc_state); /* * Compare the LUTs */ bool (*lut_equal)(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut); /* * Read out the CSCs (if any) from the hardware into the * software state. Used by eg. the hardware state checker. */ void (*read_csc)(struct intel_crtc_state *crtc_state); }; #define CTM_COEFF_SIGN (1ULL << 63) #define CTM_COEFF_1_0 (1ULL << 32) #define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1) #define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1) #define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1) #define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1) #define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1) #define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1) #define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255) #define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0) #define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1)) #define LEGACY_LUT_LENGTH 256 /* * ILK+ csc matrix: * * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0| * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1| * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2| * * ILK/SNB don't have explicit post offsets, and instead * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used: * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2 * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2 * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0 * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16 */ /* * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point * format). This macro takes the coefficient we want transformed and the * number of fractional bits. * * We only have a 9 bits precision window which slides depending on the value * of the CTM coefficient and we write the value from bit 3. We also round the * value. */ #define ILK_CSC_COEFF_FP(coeff, fbits) \ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) #define ILK_CSC_COEFF_1_0 0x7800 #define ILK_CSC_COEFF_LIMITED_RANGE ((235 - 16) << (12 - 8)) /* exponent 0 */ #define ILK_CSC_POSTOFF_LIMITED_RANGE (16 << (12 - 8)) static const struct intel_csc_matrix ilk_csc_matrix_identity = { .preoff = {}, .coeff = { ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, }, .postoff = {}, }; /* Full range RGB -> limited range RGB matrix */ static const struct intel_csc_matrix ilk_csc_matrix_limited_range = { .preoff = {}, .coeff = { ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, }, .postoff = { ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, }, }; /* BT.709 full range RGB -> limited range YCbCr matrix */ static const struct intel_csc_matrix ilk_csc_matrix_rgb_to_ycbcr = { .preoff = {}, .coeff = { 0x1e08, 0x9cc0, 0xb528, 0x2ba8, 0x09d8, 0x37e8, 0xbce8, 0x9ad8, 0x1e08, }, .postoff = { 0x0800, 0x0100, 0x0800, }, }; static void intel_csc_clear(struct intel_csc_matrix *csc) { memset(csc, 0, sizeof(*csc)); } static bool lut_is_legacy(const struct drm_property_blob *lut) { return lut && drm_color_lut_size(lut) == LEGACY_LUT_LENGTH; } /* * When using limited range, multiply the matrix given by userspace by * the matrix that we would use for the limited range. */ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) { int i; for (i = 0; i < 9; i++) { u64 user_coeff = input[i]; u32 limited_coeff = CTM_COEFF_LIMITED_RANGE; u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0, CTM_COEFF_4_0 - 1) >> 2; /* * By scaling every co-efficient with limited range (16-235) * vs full range (0-255) the final o/p will be scaled down to * fit in the limited range supported by the panel. */ result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30; result[i] |= user_coeff & CTM_COEFF_SIGN; } return result; } static void ilk_update_pipe_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]); intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]); intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]); intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe), csc->coeff[0] << 16 | csc->coeff[1]); intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), csc->coeff[2] << 16); intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe), csc->coeff[3] << 16 | csc->coeff[4]); intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), csc->coeff[5] << 16); intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe), csc->coeff[6] << 16 | csc->coeff[7]); intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), csc->coeff[8] << 16); if (DISPLAY_VER(i915) < 7) return; intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]); intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]); intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]); } static void ilk_read_pipe_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tmp; csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe)); csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe)); csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe)); tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; if (DISPLAY_VER(i915) < 7) return; csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe)); csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe)); csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe)); } static void ilk_read_csc(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_enable) ilk_read_pipe_csc(crtc, &crtc_state->csc); } static void skl_read_csc(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* * Display WA #1184: skl,glk * Wa_1406463849: icl * * Danger! On SKL-ICL *reads* from the CSC coeff/offset registers * will disarm an already armed CSC double buffer update. * So this must not be called while armed. Fortunately the state checker * readout happens only after the update has been already been latched. * * On earlier and later platforms only writes to said registers will * disarm the update. This is considered normal behavior and also * happens with various other hardware units. */ if (crtc_state->csc_enable) ilk_read_pipe_csc(crtc, &crtc_state->csc); } static void icl_update_output_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), csc->coeff[0] << 16 | csc->coeff[1]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe), csc->coeff[2] << 16); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), csc->coeff[3] << 16 | csc->coeff[4]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe), csc->coeff[5] << 16); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), csc->coeff[6] << 16 | csc->coeff[7]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe), csc->coeff[8] << 16); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]); intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]); } static void icl_read_output_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tmp; csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe)); csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe)); csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe)); tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe)); csc->coeff[0] = tmp >> 16; csc->coeff[1] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe)); csc->coeff[2] = tmp >> 16; tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe)); csc->coeff[3] = tmp >> 16; csc->coeff[4] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe)); csc->coeff[5] = tmp >> 16; tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe)); csc->coeff[6] = tmp >> 16; csc->coeff[7] = tmp & 0xffff; tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe)); csc->coeff[8] = tmp >> 16; csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe)); csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe)); csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe)); } static void icl_read_csc(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* * Wa_1406463849: icl * * See skl_read_csc() */ if (crtc_state->csc_mode & ICL_CSC_ENABLE) ilk_read_pipe_csc(crtc, &crtc_state->csc); if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) icl_read_output_csc(crtc, &crtc_state->output_csc); } static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* icl+ have dedicated output CSC */ if (DISPLAY_VER(i915) >= 11) return false; /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) return false; return crtc_state->limited_color_range; } static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (!ilk_limited_range(crtc_state)) return false; if (crtc_state->c8_planes) return false; if (DISPLAY_VER(i915) == 10) return crtc_state->hw.gamma_lut; else return crtc_state->hw.gamma_lut && (crtc_state->hw.degamma_lut || crtc_state->hw.ctm); } static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) { if (!ilk_limited_range(crtc_state)) return false; return !ilk_lut_limited_range(crtc_state); } static void ilk_csc_copy(struct drm_i915_private *i915, struct intel_csc_matrix *dst, const struct intel_csc_matrix *src) { *dst = *src; if (DISPLAY_VER(i915) < 7) memset(dst->postoff, 0, sizeof(dst->postoff)); } static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, struct intel_csc_matrix *csc, bool limited_color_range) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; const u64 *input; u64 temp[9]; int i; /* for preoff/postoff */ if (limited_color_range) ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range); else ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity); if (limited_color_range) input = ctm_mult_by_limited(temp, ctm->matrix); else input = ctm->matrix; /* * Convert fixed point S31.32 input to format supported by the * hardware. */ for (i = 0; i < 9; i++) { u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; /* * Clamp input value to min/max supported by * hardware. */ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); csc->coeff[i] = 0; /* sign bit */ if (CTM_COEFF_NEGATIVE(input[i])) csc->coeff[i] |= 1 << 15; if (abs_coeff < CTM_COEFF_0_125) csc->coeff[i] |= (3 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 12); else if (abs_coeff < CTM_COEFF_0_25) csc->coeff[i] |= (2 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 11); else if (abs_coeff < CTM_COEFF_0_5) csc->coeff[i] |= (1 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 10); else if (abs_coeff < CTM_COEFF_1_0) csc->coeff[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); else if (abs_coeff < CTM_COEFF_2_0) csc->coeff[i] |= (7 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 8); else csc->coeff[i] |= (6 << 12) | ILK_CSC_COEFF_FP(abs_coeff, 7); } } static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range); } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (limited_color_range) { drm_WARN_ON(&i915->drm, !crtc_state->csc_enable); ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range); } else if (crtc_state->csc_enable) { /* * On GLK both pipe CSC and degamma LUT are controlled * by csc_enable. Hence for the cases where the degama * LUT is needed but CSC is not we need to load an * identity matrix. */ drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915)); ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity); } else { intel_csc_clear(&crtc_state->csc); } } static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_enable) ilk_update_pipe_csc(crtc, &crtc_state->csc); } static void icl_assign_csc(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (crtc_state->hw.ctm) { drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0); ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false); } else { drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->csc); } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr); } else if (crtc_state->limited_color_range) { drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0); ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range); } else { drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0); intel_csc_clear(&crtc_state->output_csc); } } static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_mode & ICL_CSC_ENABLE) ilk_update_pipe_csc(crtc, &crtc_state->csc); if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) icl_update_output_csc(crtc, &crtc_state->output_csc); } static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits) { s64 c = CTM_COEFF_ABS(coeff); /* leave an extra bit for rounding */ c >>= 32 - frac_bits - 1; /* round and drop the extra bit */ c = (c + 1) >> 1; if (CTM_COEFF_NEGATIVE(coeff)) c = -c; c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1), (s64)(BIT(int_bits + frac_bits - 1) - 1)); return c & (BIT(int_bits + frac_bits) - 1); } /* * VLV/CHV Wide Gamut Color Correction (WGC) CSC * |r| | c0 c1 c2 | |r| * |g| = | c3 c4 c5 | x |g| * |b| | c6 c7 c8 | |b| * * Coefficients are two's complement s2.10. */ static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state, struct intel_csc_matrix *csc) { const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; int i; for (i = 0; i < 9; i++) csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 2, 10); } static void vlv_load_wgc_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(pipe), csc->coeff[1] << 16 | csc->coeff[0]); intel_de_write_fw(dev_priv, PIPE_WGC_C02(pipe), csc->coeff[2]); intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(pipe), csc->coeff[4] << 16 | csc->coeff[3]); intel_de_write_fw(dev_priv, PIPE_WGC_C12(pipe), csc->coeff[5]); intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(pipe), csc->coeff[7] << 16 | csc->coeff[6]); intel_de_write_fw(dev_priv, PIPE_WGC_C22(pipe), csc->coeff[8]); } static void vlv_read_wgc_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tmp; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(pipe)); csc->coeff[2] = tmp & 0xffff; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(pipe)); csc->coeff[3] = tmp & 0xffff; csc->coeff[4] = tmp >> 16; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(pipe)); csc->coeff[5] = tmp & 0xffff; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(pipe)); csc->coeff[8] = tmp & 0xffff; } static void vlv_read_csc(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->wgc_enable) vlv_read_wgc_csc(crtc, &crtc_state->csc); } static void vlv_assign_csc(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (crtc_state->hw.ctm) { drm_WARN_ON(&i915->drm, !crtc_state->wgc_enable); vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); intel_csc_clear(&crtc_state->csc); } } /* * CHV Color Gamut Mapping (CGM) CSC * |r| | c0 c1 c2 | |r| * |g| = | c3 c4 c5 | x |g| * |b| | c6 c7 c8 | |b| * * Coefficients are two's complement s4.12. */ static void chv_cgm_csc_convert_ctm(const struct intel_crtc_state *crtc_state, struct intel_csc_matrix *csc) { const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; int i; for (i = 0; i < 9; i++) csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 4, 12); } #define CHV_CGM_CSC_COEFF_1_0 (1 << 12) static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = { .coeff = { CHV_CGM_CSC_COEFF_1_0, 0, 0, 0, CHV_CGM_CSC_COEFF_1_0, 0, 0, 0, CHV_CGM_CSC_COEFF_1_0, }, }; static void chv_load_cgm_csc(struct intel_crtc *crtc, const struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe), csc->coeff[1] << 16 | csc->coeff[0]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe), csc->coeff[3] << 16 | csc->coeff[2]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe), csc->coeff[5] << 16 | csc->coeff[4]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe), csc->coeff[7] << 16 | csc->coeff[6]); intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe), csc->coeff[8]); } static void chv_read_cgm_csc(struct intel_crtc *crtc, struct intel_csc_matrix *csc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tmp; tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe)); csc->coeff[0] = tmp & 0xffff; csc->coeff[1] = tmp >> 16; tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe)); csc->coeff[2] = tmp & 0xffff; csc->coeff[3] = tmp >> 16; tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe)); csc->coeff[4] = tmp & 0xffff; csc->coeff[5] = tmp >> 16; tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe)); csc->coeff[6] = tmp & 0xffff; csc->coeff[7] = tmp >> 16; tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe)); csc->coeff[8] = tmp & 0xffff; } static void chv_read_csc(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) chv_read_cgm_csc(crtc, &crtc_state->csc); } static void chv_assign_csc(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); drm_WARN_ON(&i915->drm, crtc_state->wgc_enable); if (crtc_state->hw.ctm) { drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc); } else { drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0); crtc_state->csc = chv_cgm_csc_matrix_identity; } } /* convert hw value with given bit_precision to lut property val */ static u32 intel_color_lut_pack(u32 val, int bit_precision) { u32 max = 0xffff >> (16 - bit_precision); val = clamp_val(val, 0, max); if (bit_precision < 16) val <<= 16 - bit_precision; return val; } static u32 i9xx_lut_8(const struct drm_color_lut *color) { return REG_FIELD_PREP(PALETTE_RED_MASK, drm_color_lut_extract(color->red, 8)) | REG_FIELD_PREP(PALETTE_GREEN_MASK, drm_color_lut_extract(color->green, 8)) | REG_FIELD_PREP(PALETTE_BLUE_MASK, drm_color_lut_extract(color->blue, 8)); } static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val) { entry->red = intel_color_lut_pack(REG_FIELD_GET(PALETTE_RED_MASK, val), 8); entry->green = intel_color_lut_pack(REG_FIELD_GET(PALETTE_GREEN_MASK, val), 8); entry->blue = intel_color_lut_pack(REG_FIELD_GET(PALETTE_BLUE_MASK, val), 8); } /* i8xx/i9xx+ 10bit slope format "even DW" (low 8 bits) */ static u32 _i9xx_lut_10_ldw(u16 a) { return drm_color_lut_extract(a, 10) & 0xff; } static u32 i9xx_lut_10_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_ldw(color[0].red)) | REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_ldw(color[0].green)) | REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_ldw(color[0].blue)); } /* i8xx/i9xx+ 10bit slope format "odd DW" (high 2 bits + slope) */ static u32 _i9xx_lut_10_udw(u16 a, u16 b) { unsigned int mantissa, exponent; a = drm_color_lut_extract(a, 10); b = drm_color_lut_extract(b, 10); /* b = a + 8 * m * 2 ^ -e */ mantissa = clamp(b - a, 0, 0x7f); exponent = 3; while (mantissa > 0xf) { mantissa >>= 1; exponent--; } return (exponent << 6) | (mantissa << 2) | (a >> 8); } static u32 i9xx_lut_10_udw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_udw(color[0].red, color[1].red)) | REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_udw(color[0].green, color[1].green)) | REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_udw(color[0].blue, color[1].blue)); } static void i9xx_lut_10_pack(struct drm_color_lut *color, u32 ldw, u32 udw) { u16 red = REG_FIELD_GET(PALETTE_10BIT_RED_LDW_MASK, ldw) | REG_FIELD_GET(PALETTE_10BIT_RED_UDW_MASK, udw) << 8; u16 green = REG_FIELD_GET(PALETTE_10BIT_GREEN_LDW_MASK, ldw) | REG_FIELD_GET(PALETTE_10BIT_GREEN_UDW_MASK, udw) << 8; u16 blue = REG_FIELD_GET(PALETTE_10BIT_BLUE_LDW_MASK, ldw) | REG_FIELD_GET(PALETTE_10BIT_BLUE_UDW_MASK, udw) << 8; color->red = intel_color_lut_pack(red, 10); color->green = intel_color_lut_pack(green, 10); color->blue = intel_color_lut_pack(blue, 10); } static void i9xx_lut_10_pack_slope(struct drm_color_lut *color, u32 ldw, u32 udw) { int r_exp = REG_FIELD_GET(PALETTE_10BIT_RED_EXP_MASK, udw); int r_mant = REG_FIELD_GET(PALETTE_10BIT_RED_MANT_MASK, udw); int g_exp = REG_FIELD_GET(PALETTE_10BIT_GREEN_EXP_MASK, udw); int g_mant = REG_FIELD_GET(PALETTE_10BIT_GREEN_MANT_MASK, udw); int b_exp = REG_FIELD_GET(PALETTE_10BIT_BLUE_EXP_MASK, udw); int b_mant = REG_FIELD_GET(PALETTE_10BIT_BLUE_MANT_MASK, udw); i9xx_lut_10_pack(color, ldw, udw); color->red += r_mant << (3 - r_exp); color->green += g_mant << (3 - g_exp); color->blue += b_mant << (3 - b_exp); } /* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */ static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PALETTE_RED_MASK, color->red & 0xff) | REG_FIELD_PREP(PALETTE_GREEN_MASK, color->green & 0xff) | REG_FIELD_PREP(PALETTE_BLUE_MASK, color->blue & 0xff); } /* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PALETTE_RED_MASK, color->red >> 8) | REG_FIELD_PREP(PALETTE_GREEN_MASK, color->green >> 8) | REG_FIELD_PREP(PALETTE_BLUE_MASK, color->blue >> 8); } static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 | REG_FIELD_GET(PALETTE_RED_MASK, ldw); entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 | REG_FIELD_GET(PALETTE_GREEN_MASK, ldw); entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 | REG_FIELD_GET(PALETTE_BLUE_MASK, ldw); } static u16 i965_lut_11p6_max_pack(u32 val) { /* PIPEGCMAX is 11.6, clamp to 10.6 */ return clamp_val(val, 0, 0xffff); } static u32 ilk_lut_10(const struct drm_color_lut *color) { return REG_FIELD_PREP(PREC_PALETTE_10_RED_MASK, drm_color_lut_extract(color->red, 10)) | REG_FIELD_PREP(PREC_PALETTE_10_GREEN_MASK, drm_color_lut_extract(color->green, 10)) | REG_FIELD_PREP(PREC_PALETTE_10_BLUE_MASK, drm_color_lut_extract(color->blue, 10)); } static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val) { entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_RED_MASK, val), 10); entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_GREEN_MASK, val), 10); entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_BLUE_MASK, val), 10); } /* ilk+ "12.4" interpolated format (low 6 bits) */ static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PREC_PALETTE_12P4_RED_LDW_MASK, color->red & 0x3f) | REG_FIELD_PREP(PREC_PALETTE_12P4_GREEN_LDW_MASK, color->green & 0x3f) | REG_FIELD_PREP(PREC_PALETTE_12P4_BLUE_LDW_MASK, color->blue & 0x3f); } /* ilk+ "12.4" interpolated format (high 10 bits) */ static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color) { return REG_FIELD_PREP(PREC_PALETTE_12P4_RED_UDW_MASK, color->red >> 6) | REG_FIELD_PREP(PREC_PALETTE_12P4_GREEN_UDW_MASK, color->green >> 6) | REG_FIELD_PREP(PREC_PALETTE_12P4_BLUE_UDW_MASK, color->blue >> 6); } static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { entry->red = REG_FIELD_GET(PREC_PALETTE_12P4_RED_UDW_MASK, udw) << 6 | REG_FIELD_GET(PREC_PALETTE_12P4_RED_LDW_MASK, ldw); entry->green = REG_FIELD_GET(PREC_PALETTE_12P4_GREEN_UDW_MASK, udw) << 6 | REG_FIELD_GET(PREC_PALETTE_12P4_GREEN_LDW_MASK, ldw); entry->blue = REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_UDW_MASK, udw) << 6 | REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_LDW_MASK, ldw); } static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) { /* * Despite Wa_1406463849, ICL no longer suffers from the SKL * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()). * Possibly due to the extra sticky CSC arming * (see icl_color_post_update()). * * On TGL+ all CSC arming issues have been properly fixed. */ icl_load_csc_matrix(crtc_state); } static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) { /* * Possibly related to display WA #1184, SKL CSC loses the latched * CSC coeff/offset register values if the CSC registers are disarmed * between DC5 exit and PSR exit. This will cause the plane(s) to * output all black (until CSC_MODE is rearmed and properly latched). * Once PSR exit (and proper register latching) has occurred the * danger is over. Thus when PSR is enabled the CSC coeff/offset * register programming will be peformed from skl_color_commit_arm() * which is called after PSR exit. */ if (!crtc_state->has_psr) ilk_load_csc_matrix(crtc_state); } static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) { ilk_load_csc_matrix(crtc_state); } static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) { /* update TRANSCONF GAMMA_MODE */ i9xx_set_pipeconf(crtc_state); } static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* update TRANSCONF GAMMA_MODE */ ilk_set_pipeconf(crtc_state); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_de_write(i915, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val = 0; if (crtc_state->has_psr) ilk_load_csc_matrix(crtc_state); /* * We don't (yet) allow userspace to control the pipe background color, * so force it to black, but apply pipe gamma and CSC appropriately * so that its handling will match how we program our planes. */ if (crtc_state->gamma_enable) val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; if (crtc_state->csc_enable) val |= SKL_BOTTOM_COLOR_CSC_ENABLE; intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), val); intel_de_write(i915, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * We don't (yet) allow userspace to control the pipe background color, * so force it to black. */ intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0); intel_de_write(i915, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } static void icl_color_post_update(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * Despite Wa_1406463849, ICL CSC is no longer disarmed by * coeff/offset register *writes*. Instead, once CSC_MODE * is armed it stays armed, even after it has been latched. * Afterwards the coeff/offset registers become effectively * self-arming. That self-arming must be disabled before the * next icl_color_commit_noarm() tries to write the next set * of coeff/offset registers. Fortunately register *reads* * do still disarm the CSC. Naturally this must not be done * until the previously written CSC registers have actually * been latched. * * TGL+ no longer need this workaround. */ intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe)); } static struct drm_property_blob * create_linear_lut(struct drm_i915_private *i915, int lut_size) { struct drm_property_blob *blob; struct drm_color_lut *lut; int i; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return blob; lut = blob->data; for (i = 0; i < lut_size; i++) { u16 val = 0xffff * i / (lut_size - 1); lut[i].red = val; lut[i].green = val; lut[i].blue = val; } return blob; } static u16 lut_limited_range(unsigned int value) { unsigned int min = 16 << 8; unsigned int max = 235 << 8; return value * (max - min) / 0xffff + min; } static struct drm_property_blob * create_resized_lut(struct drm_i915_private *i915, const struct drm_property_blob *blob_in, int lut_out_size, bool limited_color_range) { int i, lut_in_size = drm_color_lut_size(blob_in); struct drm_property_blob *blob_out; const struct drm_color_lut *lut_in; struct drm_color_lut *lut_out; blob_out = drm_property_create_blob(&i915->drm, sizeof(lut_out[0]) * lut_out_size, NULL); if (IS_ERR(blob_out)) return blob_out; lut_in = blob_in->data; lut_out = blob_out->data; for (i = 0; i < lut_out_size; i++) { const struct drm_color_lut *entry = &lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)]; if (limited_color_range) { lut_out[i].red = lut_limited_range(entry->red); lut_out[i].green = lut_limited_range(entry->green); lut_out[i].blue = lut_limited_range(entry->blue); } else { lut_out[i] = *entry; } } return blob_out; } static void i9xx_load_lut_8(struct intel_crtc *crtc, const struct drm_property_blob *blob) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_color_lut *lut; enum pipe pipe = crtc->pipe; int i; if (!blob) return; lut = blob->data; for (i = 0; i < 256; i++) intel_de_write_fw(dev_priv, PALETTE(pipe, i), i9xx_lut_8(&lut[i])); } static void i9xx_load_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), i9xx_lut_10_ldw(&lut[i])); intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), i9xx_lut_10_udw(&lut[i])); } } static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: i9xx_load_lut_8(crtc, post_csc_lut); break; case GAMMA_MODE_MODE_10BIT: i9xx_load_lut_10(crtc, post_csc_lut); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static void i965_load_lut_10p6(struct intel_crtc *crtc, const struct drm_property_blob *blob) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), i965_lut_10p6_ldw(&lut[i])); intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), i965_lut_10p6_udw(&lut[i])); } intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: i9xx_load_lut_8(crtc, post_csc_lut); break; case GAMMA_MODE_MODE_10BIT: i965_load_lut_10p6(crtc, post_csc_lut); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static void ilk_lut_write(const struct intel_crtc_state *crtc_state, i915_reg_t reg, u32 val) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (crtc_state->dsb) intel_dsb_reg_write(crtc_state->dsb, reg, val); else intel_de_write_fw(i915, reg, val); } static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_color_lut *lut; enum pipe pipe = crtc->pipe; int i; if (!blob) return; lut = blob->data; for (i = 0; i < 256; i++) ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i), i9xx_lut_8(&lut[i])); } static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) ilk_lut_write(crtc_state, PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc_state, blob); break; case GAMMA_MODE_MODE_10BIT: ilk_load_lut_10(crtc_state, blob); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static int ivb_lut_10_size(u32 prec_index) { if (prec_index & PAL_PREC_SPLIT_MODE) return 512; else return 1024; } /* * IVB/HSW Bspec / PAL_PREC_INDEX: * "Restriction : Index auto increment mode is not * supported and must not be enabled." */ static void ivb_load_lut_10(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob, u32 prec_index) { const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), prec_index + i); ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_10(&lut[i])); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); } /* On BDW+ the index auto increment mode actually works */ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob, u32 prec_index) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), prec_index); ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | prec_index); for (i = 0; i < lut_size; i++) ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_10(&lut[i])); /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); } static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; /* Program the max register to clamp values > 1.0. */ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); } static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; /* Program the max register to clamp values > 1.0. */ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16); ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16); ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16); } static void ivb_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc_state, blob); break; case GAMMA_MODE_MODE_SPLIT: ivb_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); ivb_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); break; case GAMMA_MODE_MODE_10BIT: ivb_load_lut_10(crtc_state, blob, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static void bdw_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc_state, blob); break; case GAMMA_MODE_MODE_SPLIT: bdw_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); break; case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc_state, blob, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static int glk_degamma_lut_size(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 13) return 131; else return 35; } /* * change_lut_val_precision: helper function to upscale or downscale lut values. * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient * as currently there are no lut values exceeding 32 bit. */ static u32 change_lut_val_precision(u32 lut_val, int to, int from) { return mul_u32_u32(lut_val, (1 << to)) / (1 << from); } static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; /* * When setting the auto-increment bit, the hardware seems to * ignore the index bits, so we need to reset it to index 0 * separately. */ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT | PRE_CSC_GAMC_INDEX_VALUE(0)); for (i = 0; i < lut_size; i++) { /* * First lut_size entries represent range from 0 to 1.0 * 3 additional lut entries will represent extended range * inputs 3.0 and 7.0 respectively, currently clamped * at 1.0. Since the precision is 16bit, the user * value can be directly filled to register. * The pipe degamma table in GLK+ onwards doesn't * support different values per channel, so this just * programs green value which will be equal to Red and * Blue into the lut registers. * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ u32 lut_val; if (DISPLAY_VER(i915) >= 14) lut_val = change_lut_val_precision(lut[i].green, 24, 16); else lut_val = lut[i].green; ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), lut_val); } /* Clamp values > 1.0. */ while (i++ < glk_degamma_lut_size(i915)) ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; if (pre_csc_lut) glk_load_degamma_lut(crtc_state, pre_csc_lut); switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc_state, post_csc_lut); break; case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); glk_load_lut_ext2_max(crtc_state); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static void ivb_load_lut_max(const struct intel_crtc_state *crtc_state, const struct drm_color_lut *color) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red); ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green); ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue); } static void icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *blob = crtc_state->post_csc_lut; const struct drm_color_lut *lut = blob->data; enum pipe pipe = crtc->pipe; int i; /* * Program Super Fine segment (let's call it seg1)... * * Super Fine segment's step is 1/(8 * 128 * 256) and it has * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). */ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); for (i = 0; i < 9; i++) { const struct drm_color_lut *entry = &lut[i]; ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), ilk_lut_12p4_ldw(entry)); ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), ilk_lut_12p4_udw(entry)); } ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); } static void icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *blob = crtc_state->post_csc_lut; const struct drm_color_lut *lut = blob->data; const struct drm_color_lut *entry; enum pipe pipe = crtc->pipe; int i; /* * Program Fine segment (let's call it seg2)... * * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256) * ... 256/(128 * 256). So in order to program fine segment of LUT we * need to pick every 8th entry in the LUT, and program 256 indexes. * * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], * seg2[0] being unused by the hardware. */ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | PAL_PREC_INDEX_VALUE(0)); for (i = 1; i < 257; i++) { entry = &lut[i * 8]; ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); } /* * Program Coarse segment (let's call it seg3)... * * Coarse segment starts from index 0 and it's step is 1/256 ie 0, * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT * above, we need to pick every (8 * 128)th entry in LUT, and * program 256 of those. * * Spec is not very clear about if entries seg3[0] and seg3[1] are * being used or not, but we still need to program these to advance * the index. */ for (i = 0; i < 256; i++) { entry = &lut[i * 8 * 128]; ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); } ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); /* The last entry in the LUT is to be programmed in GCMAX */ entry = &lut[256 * 8 * 128]; ivb_load_lut_max(crtc_state, entry); } static void icl_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; if (pre_csc_lut) glk_load_degamma_lut(crtc_state, pre_csc_lut); switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc_state, post_csc_lut); break; case GAMMA_MODE_MODE_12BIT_MULTI_SEG: icl_program_gamma_superfine_segment(crtc_state); icl_program_gamma_multi_segment(crtc_state); ivb_load_lut_ext_max(crtc_state); glk_load_lut_ext2_max(crtc_state); break; case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); glk_load_lut_ext2_max(crtc_state); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } if (crtc_state->dsb) { intel_dsb_finish(crtc_state->dsb); intel_dsb_commit(crtc_state->dsb, false); intel_dsb_wait(crtc_state->dsb); } } static void vlv_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->wgc_enable) vlv_load_wgc_csc(crtc, &crtc_state->csc); i965_load_luts(crtc_state); } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, drm_color_lut_extract(color->green, 14)) | REG_FIELD_PREP(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, drm_color_lut_extract(color->blue, 14)); } static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color) { return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 14)); } static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, ldw), 14); entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, ldw), 14); entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_RED_UDW_MASK, udw), 14); } static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 0), chv_cgm_degamma_ldw(&lut[i])); intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 1), chv_cgm_degamma_udw(&lut[i])); } } static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color) { return REG_FIELD_PREP(CGM_PIPE_GAMMA_GREEN_LDW_MASK, drm_color_lut_extract(color->green, 10)) | REG_FIELD_PREP(CGM_PIPE_GAMMA_BLUE_LDW_MASK, drm_color_lut_extract(color->blue, 10)); } static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color) { return REG_FIELD_PREP(CGM_PIPE_GAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 10)); } static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_LDW_MASK, ldw), 10); entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_LDW_MASK, ldw), 10); entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_UDW_MASK, udw), 10); } static void chv_load_cgm_gamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_color_lut *lut = blob->data; int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0), chv_cgm_gamma_ldw(&lut[i])); intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1), chv_cgm_gamma_udw(&lut[i])); } } static void chv_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut; const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut; if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) chv_load_cgm_csc(crtc, &crtc_state->csc); if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) chv_load_cgm_degamma(crtc, pre_csc_lut); if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) chv_load_cgm_gamma(crtc, post_csc_lut); else i965_load_luts(crtc_state); intel_de_write_fw(i915, CGM_PIPE_MODE(crtc->pipe), crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); i915->display.funcs.color->load_luts(crtc_state); } void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (i915->display.funcs.color->color_commit_noarm) i915->display.funcs.color->color_commit_noarm(crtc_state); } void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); i915->display.funcs.color->color_commit_arm(crtc_state); } void intel_color_post_update(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (i915->display.funcs.color->color_post_update) i915->display.funcs.color->color_post_update(crtc_state); } void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* FIXME DSB has issues loading LUTs, disable it for now */ return; if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut) return; crtc_state->dsb = intel_dsb_prepare(crtc, 1024); } void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state) { if (!crtc_state->dsb) return; intel_dsb_cleanup(crtc_state->dsb); crtc_state->dsb = NULL; } static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); return !old_crtc_state->post_csc_lut && !old_crtc_state->pre_csc_lut; } static bool vlv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); return !old_crtc_state->wgc_enable && !old_crtc_state->post_csc_lut; } static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); /* * CGM_PIPE_MODE is itself single buffered. We'd have to * somehow split it out from chv_load_luts() if we wanted * the ability to preload the CGM LUTs/CSC without tearing. */ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode) return false; return vlv_can_preload_luts(new_crtc_state); } int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); return i915->display.funcs.color->color_check(crtc_state); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); i915->display.funcs.color->read_luts(crtc_state); if (i915->display.funcs.color->read_csc) i915->display.funcs.color->read_csc(crtc_state); } bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* * FIXME c8_planes readout missing thus * .read_luts() doesn't read out post_csc_lut. */ if (!is_pre_csc_lut && crtc_state->c8_planes) return true; return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2, is_pre_csc_lut); } static bool need_plane_update(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); /* * On pre-SKL the pipe gamma enable and pipe csc enable for * the pipe bottom color are configured via the primary plane. * We have to reconfigure that even if the plane is inactive. */ return crtc_state->active_planes & BIT(plane->id) || (DISPLAY_VER(i915) < 9 && plane->id == PLANE_PRIMARY); } static int intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_plane *plane; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state)) return 0; if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable && new_crtc_state->csc_enable == old_crtc_state->csc_enable) return 0; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { struct intel_plane_state *plane_state; if (!need_plane_update(plane, new_crtc_state)) continue; plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane->id); new_crtc_state->async_flip_planes = 0; new_crtc_state->do_async_flip = false; /* plane control register changes blocked by CxSR */ if (HAS_GMCH(i915)) new_crtc_state->disable_cxsr = true; } return 0; } static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return 0; return DISPLAY_INFO(i915)->color.gamma_lut_tests; } static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); return DISPLAY_INFO(i915)->color.degamma_lut_tests; } static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (lut_is_legacy(gamma_lut)) return LEGACY_LUT_LENGTH; return DISPLAY_INFO(i915)->color.gamma_lut_size; } static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); return DISPLAY_INFO(i915)->color.degamma_lut_size; } static int check_lut_size(const struct drm_property_blob *lut, int expected) { int len; if (!lut) return 0; len = drm_color_lut_size(lut); if (len != expected) { DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", len, expected); return -EINVAL; } return 0; } static int _check_luts(const struct intel_crtc_state *crtc_state, u32 degamma_tests, u32 gamma_tests) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; int gamma_length, degamma_length; /* C8 relies on its palette being stored in the legacy LUT */ if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) { drm_dbg_kms(&i915->drm, "C8 pixelformat requires the legacy LUT\n"); return -EINVAL; } degamma_length = intel_degamma_lut_size(crtc_state); gamma_length = intel_gamma_lut_size(crtc_state); if (check_lut_size(degamma_lut, degamma_length) || check_lut_size(gamma_lut, gamma_length)) return -EINVAL; if (drm_color_lut_check(degamma_lut, degamma_tests) || drm_color_lut_check(gamma_lut, gamma_tests)) return -EINVAL; return 0; } static int check_luts(const struct intel_crtc_state *crtc_state) { return _check_luts(crtc_state, intel_degamma_lut_tests(crtc_state), intel_gamma_lut_tests(crtc_state)); } static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable || lut_is_legacy(crtc_state->hw.gamma_lut)) return GAMMA_MODE_MODE_8BIT; else return GAMMA_MODE_MODE_10BIT; } static int i9xx_lut_10_diff(u16 a, u16 b) { return drm_color_lut_extract(a, 10) - drm_color_lut_extract(b, 10); } static int i9xx_check_lut_10(struct drm_i915_private *dev_priv, const struct drm_property_blob *blob) { const struct drm_color_lut *lut = blob->data; int lut_size = drm_color_lut_size(blob); const struct drm_color_lut *a = &lut[lut_size - 2]; const struct drm_color_lut *b = &lut[lut_size - 1]; if (i9xx_lut_10_diff(b->red, a->red) > 0x7f || i9xx_lut_10_diff(b->green, a->green) > 0x7f || i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) { drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n"); return -EINVAL; } return 0; } void intel_color_assert_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* make sure {pre,post}_csc_lut were correctly assigned */ if (DISPLAY_VER(i915) >= 11 || HAS_GMCH(i915)) { drm_WARN_ON(&i915->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut); drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); } else if (DISPLAY_VER(i915) == 10) { drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut == crtc_state->hw.gamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut); drm_WARN_ON(&i915->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != NULL && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); } else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) { drm_WARN_ON(&i915->drm, crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut && crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut); drm_WARN_ON(&i915->drm, !ilk_lut_limited_range(crtc_state) && crtc_state->post_csc_lut != crtc_state->hw.degamma_lut && crtc_state->post_csc_lut != crtc_state->hw.gamma_lut); } } static void intel_assign_luts(struct intel_crtc_state *crtc_state) { drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, crtc_state->hw.gamma_lut); } static int i9xx_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int ret; ret = check_luts(crtc_state); if (ret) return ret; crtc_state->gamma_enable = crtc_state->hw.gamma_lut && !crtc_state->c8_planes; crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); if (DISPLAY_VER(i915) < 4 && crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) { ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut); if (ret) return ret; } ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; intel_assign_luts(crtc_state); crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; } /* * VLV color pipeline: * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10 */ static int vlv_color_check(struct intel_crtc_state *crtc_state) { int ret; ret = check_luts(crtc_state); if (ret) return ret; crtc_state->gamma_enable = crtc_state->hw.gamma_lut && !crtc_state->c8_planes; crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); crtc_state->wgc_enable = crtc_state->hw.ctm; ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; intel_assign_luts(crtc_state); vlv_assign_csc(crtc_state); crtc_state->preload_luts = vlv_can_preload_luts(crtc_state); return 0; } static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) { u32 cgm_mode = 0; if (crtc_state->hw.degamma_lut) cgm_mode |= CGM_PIPE_MODE_DEGAMMA; if (crtc_state->hw.ctm) cgm_mode |= CGM_PIPE_MODE_CSC; if (crtc_state->hw.gamma_lut && !lut_is_legacy(crtc_state->hw.gamma_lut)) cgm_mode |= CGM_PIPE_MODE_GAMMA; /* * Toggling the CGM CSC on/off outside of the tiny window * between start of vblank and frame start causes underruns. * Always enable the CGM CSC as a workaround. */ cgm_mode |= CGM_PIPE_MODE_CSC; return cgm_mode; } /* * CHV color pipeline: * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma -> * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10 * * We always bypass the WGC csc and use the CGM csc * instead since it has degamma and better precision. */ static int chv_color_check(struct intel_crtc_state *crtc_state) { int ret; ret = check_luts(crtc_state); if (ret) return ret; /* * Pipe gamma will be used only for the legacy LUT. * Otherwise we bypass it and use the CGM gamma instead. */ crtc_state->gamma_enable = lut_is_legacy(crtc_state->hw.gamma_lut) && !crtc_state->c8_planes; crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; crtc_state->cgm_mode = chv_cgm_mode(crtc_state); /* * We always bypass the WGC CSC and use the CGM CSC * instead since it has degamma and better precision. */ crtc_state->wgc_enable = false; ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; intel_assign_luts(crtc_state); chv_assign_csc(crtc_state); crtc_state->preload_luts = chv_can_preload_luts(crtc_state); return 0; } static bool ilk_gamma_enable(const struct intel_crtc_state *crtc_state) { return (crtc_state->hw.gamma_lut || crtc_state->hw.degamma_lut) && !crtc_state->c8_planes; } static bool ilk_csc_enable(const struct intel_crtc_state *crtc_state) { return crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || ilk_csc_limited_range(crtc_state) || crtc_state->hw.ctm; } static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable || lut_is_legacy(crtc_state->hw.gamma_lut)) return GAMMA_MODE_MODE_8BIT; else return GAMMA_MODE_MODE_10BIT; } static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) { /* * CSC comes after the LUT in RGB->YCbCr mode. * RGB->YCbCr needs the limited range offsets added to * the output. RGB limited range output is handled by * the hw automagically elsewhere. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) return CSC_BLACK_SCREEN_OFFSET; if (crtc_state->hw.degamma_lut) return CSC_MODE_YUV_TO_RGB; return CSC_MODE_YUV_TO_RGB | CSC_POSITION_BEFORE_GAMMA; } static int ilk_assign_luts(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut); drm_property_blob_put(gamma_lut); drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut); return 0; } if (crtc_state->hw.degamma_lut || crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) { drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, crtc_state->hw.gamma_lut); } else { drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.gamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, NULL); } return 0; } static int ilk_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int ret; ret = check_luts(crtc_state); if (ret) return ret; if (crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { drm_dbg_kms(&i915->drm, "Degamma and gamma together are not possible\n"); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { drm_dbg_kms(&i915->drm, "YCbCr and CTM together are not possible\n"); return -EINVAL; } crtc_state->gamma_enable = ilk_gamma_enable(crtc_state); crtc_state->csc_enable = ilk_csc_enable(crtc_state); crtc_state->gamma_mode = ilk_gamma_mode(crtc_state); crtc_state->csc_mode = ilk_csc_mode(crtc_state); ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; ret = ilk_assign_luts(crtc_state); if (ret) return ret; ilk_assign_csc(crtc_state); crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; } static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state) { if (crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) return GAMMA_MODE_MODE_SPLIT; return ilk_gamma_mode(crtc_state); } static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) { bool limited_color_range = ilk_csc_limited_range(crtc_state); /* * CSC comes after the LUT in degamma, RGB->YCbCr, * and RGB full->limited range mode. */ if (crtc_state->hw.degamma_lut || crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || limited_color_range) return 0; return CSC_POSITION_BEFORE_GAMMA; } static int ivb_assign_luts(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct drm_property_blob *degamma_lut, *gamma_lut; if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) return ilk_assign_luts(crtc_state); drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024); drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024); degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512, false); if (IS_ERR(degamma_lut)) return PTR_ERR(degamma_lut); gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512, ilk_lut_limited_range(crtc_state)); if (IS_ERR(gamma_lut)) { drm_property_blob_put(degamma_lut); return PTR_ERR(gamma_lut); } drm_property_replace_blob(&crtc_state->pre_csc_lut, degamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut); drm_property_blob_put(degamma_lut); drm_property_blob_put(gamma_lut); return 0; } static int ivb_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int ret; ret = check_luts(crtc_state); if (ret) return ret; if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) { drm_dbg_kms(&i915->drm, "C8 pixelformat and degamma together are not possible\n"); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { drm_dbg_kms(&i915->drm, "YCbCr and CTM together are not possible\n"); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { drm_dbg_kms(&i915->drm, "YCbCr and degamma+gamma together are not possible\n"); return -EINVAL; } crtc_state->gamma_enable = ilk_gamma_enable(crtc_state); crtc_state->csc_enable = ilk_csc_enable(crtc_state); crtc_state->gamma_mode = ivb_gamma_mode(crtc_state); crtc_state->csc_mode = ivb_csc_mode(crtc_state); ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; ret = ivb_assign_luts(crtc_state); if (ret) return ret; ilk_assign_csc(crtc_state); crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; } static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable || lut_is_legacy(crtc_state->hw.gamma_lut)) return GAMMA_MODE_MODE_8BIT; else return GAMMA_MODE_MODE_10BIT; } static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_state) { return crtc_state->hw.gamma_lut && !crtc_state->c8_planes && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB; } static int glk_assign_luts(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (glk_use_pre_csc_lut_for_gamma(crtc_state)) { struct drm_property_blob *gamma_lut; gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, DISPLAY_INFO(i915)->color.degamma_lut_size, false); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); drm_property_replace_blob(&crtc_state->pre_csc_lut, gamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, NULL); drm_property_blob_put(gamma_lut); return 0; } if (ilk_lut_limited_range(crtc_state)) { struct drm_property_blob *gamma_lut; gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, drm_color_lut_size(crtc_state->hw.gamma_lut), true); if (IS_ERR(gamma_lut)) return PTR_ERR(gamma_lut); drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut); drm_property_blob_put(gamma_lut); } else { drm_property_replace_blob(&crtc_state->post_csc_lut, crtc_state->hw.gamma_lut); } drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut); /* * On GLK+ both pipe CSC and degamma LUT are controlled * by csc_enable. Hence for the cases where the CSC is * needed but degamma LUT is not we need to load a * linear degamma LUT. */ if (crtc_state->csc_enable && !crtc_state->pre_csc_lut) drm_property_replace_blob(&crtc_state->pre_csc_lut, i915->display.color.glk_linear_degamma_lut); return 0; } static int glk_check_luts(const struct intel_crtc_state *crtc_state) { u32 degamma_tests = intel_degamma_lut_tests(crtc_state); u32 gamma_tests = intel_gamma_lut_tests(crtc_state); if (glk_use_pre_csc_lut_for_gamma(crtc_state)) gamma_tests |= degamma_tests; return _check_luts(crtc_state, degamma_tests, gamma_tests); } static int glk_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int ret; ret = glk_check_luts(crtc_state); if (ret) return ret; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.ctm) { drm_dbg_kms(&i915->drm, "YCbCr and CTM together are not possible\n"); return -EINVAL; } if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB && crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) { drm_dbg_kms(&i915->drm, "YCbCr and degamma+gamma together are not possible\n"); return -EINVAL; } crtc_state->gamma_enable = !glk_use_pre_csc_lut_for_gamma(crtc_state) && crtc_state->hw.gamma_lut && !crtc_state->c8_planes; /* On GLK+ degamma LUT is controlled by csc_enable */ crtc_state->csc_enable = glk_use_pre_csc_lut_for_gamma(crtc_state) || crtc_state->hw.degamma_lut || crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || crtc_state->hw.ctm || ilk_csc_limited_range(crtc_state); crtc_state->gamma_mode = glk_gamma_mode(crtc_state); crtc_state->csc_mode = 0; ret = intel_color_add_affected_planes(crtc_state); if (ret) return ret; ret = glk_assign_luts(crtc_state); if (ret) return ret; ilk_assign_csc(crtc_state); crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; } static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) gamma_mode |= PRE_CSC_GAMMA_ENABLE; if (crtc_state->hw.gamma_lut && !crtc_state->c8_planes) gamma_mode |= POST_CSC_GAMMA_ENABLE; if (!crtc_state->hw.gamma_lut || lut_is_legacy(crtc_state->hw.gamma_lut)) gamma_mode |= GAMMA_MODE_MODE_8BIT; /* * Enable 10bit gamma for D13 * ToDo: Extend to Logarithmic Gamma once the new UAPI * is accepted and implemented by a userspace consumer */ else if (DISPLAY_VER(i915) >= 13) gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG; return gamma_mode; } static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state) { u32 csc_mode = 0; if (crtc_state->hw.ctm) csc_mode |= ICL_CSC_ENABLE; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || crtc_state->limited_color_range) csc_mode |= ICL_OUTPUT_CSC_ENABLE; return csc_mode; } static int icl_color_check(struct intel_crtc_state *crtc_state) { int ret; ret = check_luts(crtc_state); if (ret) return ret; crtc_state->gamma_mode = icl_gamma_mode(crtc_state); crtc_state->csc_mode = icl_csc_mode(crtc_state); intel_assign_luts(crtc_state); icl_assign_csc(crtc_state); crtc_state->preload_luts = intel_can_preload_luts(crtc_state); return 0; } static int i9xx_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return 0; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; case GAMMA_MODE_MODE_10BIT: return 10; default: MISSING_CASE(crtc_state->gamma_mode); return 0; } } static int i9xx_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { return 0; } static int i965_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return 0; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; case GAMMA_MODE_MODE_10BIT: return 16; default: MISSING_CASE(crtc_state->gamma_mode); return 0; } } static int ilk_gamma_mode_precision(u32 gamma_mode) { switch (gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; case GAMMA_MODE_MODE_10BIT: return 10; default: MISSING_CASE(gamma_mode); return 0; } } static bool ilk_has_post_csc_lut(const struct intel_crtc_state *crtc_state) { if (crtc_state->c8_planes) return true; return crtc_state->gamma_enable && (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) != 0; } static bool ilk_has_pre_csc_lut(const struct intel_crtc_state *crtc_state) { return crtc_state->gamma_enable && (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0; } static int ilk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!ilk_has_post_csc_lut(crtc_state)) return 0; return ilk_gamma_mode_precision(crtc_state->gamma_mode); } static int ilk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!ilk_has_pre_csc_lut(crtc_state)) return 0; return ilk_gamma_mode_precision(crtc_state->gamma_mode); } static int ivb_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (crtc_state->gamma_enable && crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return 10; return ilk_post_csc_lut_precision(crtc_state); } static int ivb_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (crtc_state->gamma_enable && crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return 10; return ilk_pre_csc_lut_precision(crtc_state); } static int chv_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) return 10; return i965_post_csc_lut_precision(crtc_state); } static int chv_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) return 14; return 0; } static int glk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return 0; return ilk_gamma_mode_precision(crtc_state->gamma_mode); } static int glk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!crtc_state->csc_enable) return 0; return 16; } static bool icl_has_post_csc_lut(const struct intel_crtc_state *crtc_state) { if (crtc_state->c8_planes) return true; return crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE; } static bool icl_has_pre_csc_lut(const struct intel_crtc_state *crtc_state) { return crtc_state->gamma_mode & PRE_CSC_GAMMA_ENABLE; } static int icl_post_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!icl_has_post_csc_lut(crtc_state)) return 0; switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: return 8; case GAMMA_MODE_MODE_10BIT: return 10; case GAMMA_MODE_MODE_12BIT_MULTI_SEG: return 16; default: MISSING_CASE(crtc_state->gamma_mode); return 0; } } static int icl_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state) { if (!icl_has_pre_csc_lut(crtc_state)) return 0; return 16; } static bool err_check(struct drm_color_lut *lut1, struct drm_color_lut *lut2, u32 err) { return ((abs((long)lut2->red - lut1->red)) <= err) && ((abs((long)lut2->blue - lut1->blue)) <= err) && ((abs((long)lut2->green - lut1->green)) <= err); } static bool intel_lut_entries_equal(struct drm_color_lut *lut1, struct drm_color_lut *lut2, int lut_size, u32 err) { int i; for (i = 0; i < lut_size; i++) { if (!err_check(&lut1[i], &lut2[i], err)) return false; } return true; } static bool intel_lut_equal(const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, int check_size, int precision) { struct drm_color_lut *lut1, *lut2; int lut_size1, lut_size2; u32 err; if (!blob1 != !blob2) return false; if (!blob1 != !precision) return false; if (!blob1) return true; lut_size1 = drm_color_lut_size(blob1); lut_size2 = drm_color_lut_size(blob2); if (lut_size1 != lut_size2) return false; if (check_size > lut_size1) return false; lut1 = blob1->data; lut2 = blob2->data; err = 0xffff >> precision; if (!check_size) check_size = lut_size1; return intel_lut_entries_equal(lut1, lut2, check_size, err); } static bool i9xx_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { int check_size = 0; if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, i9xx_pre_csc_lut_precision(crtc_state)); /* 10bit mode last entry is implicit, just skip it */ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) check_size = 128; return intel_lut_equal(blob1, blob2, check_size, i9xx_post_csc_lut_precision(crtc_state)); } static bool i965_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, i9xx_pre_csc_lut_precision(crtc_state)); else return intel_lut_equal(blob1, blob2, 0, i965_post_csc_lut_precision(crtc_state)); } static bool chv_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, chv_pre_csc_lut_precision(crtc_state)); else return intel_lut_equal(blob1, blob2, 0, chv_post_csc_lut_precision(crtc_state)); } static bool ilk_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, ilk_pre_csc_lut_precision(crtc_state)); else return intel_lut_equal(blob1, blob2, 0, ilk_post_csc_lut_precision(crtc_state)); } static bool ivb_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, ivb_pre_csc_lut_precision(crtc_state)); else return intel_lut_equal(blob1, blob2, 0, ivb_post_csc_lut_precision(crtc_state)); } static bool glk_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, glk_pre_csc_lut_precision(crtc_state)); else return intel_lut_equal(blob1, blob2, 0, glk_post_csc_lut_precision(crtc_state)); } static bool icl_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, const struct drm_property_blob *blob2, bool is_pre_csc_lut) { int check_size = 0; if (is_pre_csc_lut) return intel_lut_equal(blob1, blob2, 0, icl_pre_csc_lut_precision(crtc_state)); /* hw readout broken except for the super fine segment :( */ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == GAMMA_MODE_MODE_12BIT_MULTI_SEG) check_size = 9; return intel_lut_equal(blob1, blob2, check_size, icl_post_csc_lut_precision(crtc_state)); } static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; blob = drm_property_create_blob(&dev_priv->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } return blob; } static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; u32 ldw, udw; int i; blob = drm_property_create_blob(&dev_priv->drm, lut_size * sizeof(lut[0]), NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size - 1; i++) { ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); i9xx_lut_10_pack(&lut[i], ldw, udw); } i9xx_lut_10_pack_slope(&lut[i], ldw, udw); return blob; } static void i9xx_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: crtc_state->post_csc_lut = i9xx_read_lut_8(crtc); break; case GAMMA_MODE_MODE_10BIT: crtc_state->post_csc_lut = i9xx_read_lut_10(crtc); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size - 1; i++) { u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0))); lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1))); lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } static void i965_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: crtc_state->post_csc_lut = i9xx_read_lut_8(crtc); break; case GAMMA_MODE_MODE_10BIT: crtc_state->post_csc_lut = i965_read_lut_10p6(crtc); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size; i++) { u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0)); u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1)); chv_cgm_degamma_pack(&lut[i], ldw, udw); } return blob; } static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size; i++) { u32 ldw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0)); u32 udw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } return blob; } static void chv_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) crtc_state->pre_csc_lut = chv_read_cgm_degamma(crtc); if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) crtc_state->post_csc_lut = chv_read_cgm_gamma(crtc); else i965_read_luts(crtc_state); } static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; int i; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * LEGACY_LUT_LENGTH, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { u32 val = intel_de_read_fw(i915, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } return blob; } static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size; i++) { u32 val = intel_de_read_fw(i915, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } return blob; } static void ilk_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_property_blob **blob = ilk_has_post_csc_lut(crtc_state) ? &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut; if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: *blob = ilk_read_lut_8(crtc); break; case GAMMA_MODE_MODE_10BIT: *blob = ilk_read_lut_10(crtc); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } /* * IVB/HSW Bspec / PAL_PREC_INDEX: * "Restriction : Index auto increment mode is not * supported and must not be enabled." */ static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; for (i = 0; i < lut_size; i++) { u32 val; intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index + i); val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; } static void ivb_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_property_blob **blob = ilk_has_post_csc_lut(crtc_state) ? &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut; if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: *blob = ilk_read_lut_8(crtc); break; case GAMMA_MODE_MODE_SPLIT: crtc_state->pre_csc_lut = ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); crtc_state->post_csc_lut = ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); break; case GAMMA_MODE_MODE_10BIT: *blob = ivb_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } /* On BDW+ the index auto increment mode actually works */ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int i, lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), prec_index); intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT | prec_index); for (i = 0; i < lut_size; i++) { u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), PAL_PREC_INDEX_VALUE(0)); return blob; } static void bdw_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_property_blob **blob = ilk_has_post_csc_lut(crtc_state) ? &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut; if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: *blob = ilk_read_lut_8(crtc); break; case GAMMA_MODE_MODE_SPLIT: crtc_state->pre_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); break; case GAMMA_MODE_MODE_10BIT: *blob = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; /* * When setting the auto-increment bit, the hardware seems to * ignore the index bits, so we need to reset it to index 0 * separately. */ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT | PRE_CSC_GAMC_INDEX_VALUE(0)); for (i = 0; i < lut_size; i++) { u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); /* * For MTL and beyond, convert back the 24 bit lut values * read from HW to 16 bit values to maintain parity with * userspace values */ if (DISPLAY_VER(dev_priv) >= 14) val = change_lut_val_precision(val, 16, 24); lut[i].red = val; lut[i].green = val; lut[i].blue = val; } intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_INDEX_VALUE(0)); return blob; } static void glk_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->csc_enable) crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc); if (!crtc_state->gamma_enable && !crtc_state->c8_planes) return; switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: crtc_state->post_csc_lut = ilk_read_lut_8(crtc); break; case GAMMA_MODE_MODE_10BIT: crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static struct drm_property_blob * icl_read_lut_multi_segment(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; blob = drm_property_create_blob(&i915->drm, sizeof(lut[0]) * lut_size, NULL); if (IS_ERR(blob)) return NULL; lut = blob->data; intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_AUTO_INCREMENT | PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); for (i = 0; i < 9; i++) { u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); u32 udw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe)); ilk_lut_12p4_pack(&lut[i], ldw, udw); } intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_MULTI_SEG_INDEX_VALUE(0)); /* * FIXME readouts from PAL_PREC_DATA register aren't giving * correct values in the case of fine and coarse segments. * Restricting readouts only for super fine segment as of now. */ return blob; } static void icl_read_luts(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (icl_has_pre_csc_lut(crtc_state)) crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc); if (!icl_has_post_csc_lut(crtc_state)) return; switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: crtc_state->post_csc_lut = ilk_read_lut_8(crtc); break; case GAMMA_MODE_MODE_10BIT: crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); break; case GAMMA_MODE_MODE_12BIT_MULTI_SEG: crtc_state->post_csc_lut = icl_read_lut_multi_segment(crtc); break; default: MISSING_CASE(crtc_state->gamma_mode); break; } } static const struct intel_color_funcs chv_color_funcs = { .color_check = chv_color_check, .color_commit_arm = i9xx_color_commit_arm, .load_luts = chv_load_luts, .read_luts = chv_read_luts, .lut_equal = chv_lut_equal, .read_csc = chv_read_csc, }; static const struct intel_color_funcs vlv_color_funcs = { .color_check = vlv_color_check, .color_commit_arm = i9xx_color_commit_arm, .load_luts = vlv_load_luts, .read_luts = i965_read_luts, .lut_equal = i965_lut_equal, .read_csc = vlv_read_csc, }; static const struct intel_color_funcs i965_color_funcs = { .color_check = i9xx_color_check, .color_commit_arm = i9xx_color_commit_arm, .load_luts = i965_load_luts, .read_luts = i965_read_luts, .lut_equal = i965_lut_equal, }; static const struct intel_color_funcs i9xx_color_funcs = { .color_check = i9xx_color_check, .color_commit_arm = i9xx_color_commit_arm, .load_luts = i9xx_load_luts, .read_luts = i9xx_read_luts, .lut_equal = i9xx_lut_equal, }; static const struct intel_color_funcs tgl_color_funcs = { .color_check = icl_color_check, .color_commit_noarm = icl_color_commit_noarm, .color_commit_arm = icl_color_commit_arm, .load_luts = icl_load_luts, .read_luts = icl_read_luts, .lut_equal = icl_lut_equal, .read_csc = icl_read_csc, }; static const struct intel_color_funcs icl_color_funcs = { .color_check = icl_color_check, .color_commit_noarm = icl_color_commit_noarm, .color_commit_arm = icl_color_commit_arm, .color_post_update = icl_color_post_update, .load_luts = icl_load_luts, .read_luts = icl_read_luts, .lut_equal = icl_lut_equal, .read_csc = icl_read_csc, }; static const struct intel_color_funcs glk_color_funcs = { .color_check = glk_color_check, .color_commit_noarm = skl_color_commit_noarm, .color_commit_arm = skl_color_commit_arm, .load_luts = glk_load_luts, .read_luts = glk_read_luts, .lut_equal = glk_lut_equal, .read_csc = skl_read_csc, }; static const struct intel_color_funcs skl_color_funcs = { .color_check = ivb_color_check, .color_commit_noarm = skl_color_commit_noarm, .color_commit_arm = skl_color_commit_arm, .load_luts = bdw_load_luts, .read_luts = bdw_read_luts, .lut_equal = ivb_lut_equal, .read_csc = skl_read_csc, }; static const struct intel_color_funcs bdw_color_funcs = { .color_check = ivb_color_check, .color_commit_noarm = ilk_color_commit_noarm, .color_commit_arm = hsw_color_commit_arm, .load_luts = bdw_load_luts, .read_luts = bdw_read_luts, .lut_equal = ivb_lut_equal, .read_csc = ilk_read_csc, }; static const struct intel_color_funcs hsw_color_funcs = { .color_check = ivb_color_check, .color_commit_noarm = ilk_color_commit_noarm, .color_commit_arm = hsw_color_commit_arm, .load_luts = ivb_load_luts, .read_luts = ivb_read_luts, .lut_equal = ivb_lut_equal, .read_csc = ilk_read_csc, }; static const struct intel_color_funcs ivb_color_funcs = { .color_check = ivb_color_check, .color_commit_noarm = ilk_color_commit_noarm, .color_commit_arm = ilk_color_commit_arm, .load_luts = ivb_load_luts, .read_luts = ivb_read_luts, .lut_equal = ivb_lut_equal, .read_csc = ilk_read_csc, }; static const struct intel_color_funcs ilk_color_funcs = { .color_check = ilk_color_check, .color_commit_noarm = ilk_color_commit_noarm, .color_commit_arm = ilk_color_commit_arm, .load_luts = ilk_load_luts, .read_luts = ilk_read_luts, .lut_equal = ilk_lut_equal, .read_csc = ilk_read_csc, }; void intel_color_crtc_init(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int degamma_lut_size, gamma_lut_size; bool has_ctm; drm_mode_crtc_set_gamma_size(&crtc->base, 256); gamma_lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size; degamma_lut_size = DISPLAY_INFO(i915)->color.degamma_lut_size; has_ctm = DISPLAY_VER(i915) >= 5; /* * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the * only mode supported by Alviso and Grantsdale." * * Actually looks like this affects all of gen3. * Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm) * are confirmed not to suffer from this restriction. */ if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A) gamma_lut_size = 256; drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size, has_ctm, gamma_lut_size); } int intel_color_init(struct drm_i915_private *i915) { struct drm_property_blob *blob; if (DISPLAY_VER(i915) != 10) return 0; blob = create_linear_lut(i915, DISPLAY_INFO(i915)->color.degamma_lut_size); if (IS_ERR(blob)) return PTR_ERR(blob); i915->display.color.glk_linear_degamma_lut = blob; return 0; } void intel_color_init_hooks(struct drm_i915_private *i915) { if (HAS_GMCH(i915)) { if (IS_CHERRYVIEW(i915)) i915->display.funcs.color = &chv_color_funcs; else if (IS_VALLEYVIEW(i915)) i915->display.funcs.color = &vlv_color_funcs; else if (DISPLAY_VER(i915) >= 4) i915->display.funcs.color = &i965_color_funcs; else i915->display.funcs.color = &i9xx_color_funcs; } else { if (DISPLAY_VER(i915) >= 12) i915->display.funcs.color = &tgl_color_funcs; else if (DISPLAY_VER(i915) == 11) i915->display.funcs.color = &icl_color_funcs; else if (DISPLAY_VER(i915) == 10) i915->display.funcs.color = &glk_color_funcs; else if (DISPLAY_VER(i915) == 9) i915->display.funcs.color = &skl_color_funcs; else if (DISPLAY_VER(i915) == 8) i915->display.funcs.color = &bdw_color_funcs; else if (IS_HASWELL(i915)) i915->display.funcs.color = &hsw_color_funcs; else if (DISPLAY_VER(i915) == 7) i915->display.funcs.color = &ivb_color_funcs; else i915->display.funcs.color = &ilk_color_funcs; } }
linux-master
drivers/gpu/drm/i915/display/intel_color.c
/* * Copyright (c) 2006 Dave Airlie <[email protected]> * Copyright © 2006-2008,2010 Intel Corporation * Jesse Barnes <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * Chris Wilson <[email protected]> */ #include <linux/export.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> #include <drm/display/drm_hdcp_helper.h> #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_gmbus.h" #include "intel_gmbus_regs.h" struct intel_gmbus { struct i2c_adapter adapter; #define GMBUS_FORCE_BIT_RETRY (1U << 31) u32 force_bit; u32 reg0; i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; struct drm_i915_private *i915; }; enum gmbus_gpio { GPIOA, GPIOB, GPIOC, GPIOD, GPIOE, GPIOF, GPIOG, GPIOH, __GPIOI_UNUSED, GPIOJ, GPIOK, GPIOL, GPIOM, GPION, GPIOO, }; struct gmbus_pin { const char *name; enum gmbus_gpio gpio; }; /* Map gmbus pin pairs to names and registers. */ static const struct gmbus_pin gmbus_pins[] = { [GMBUS_PIN_SSC] = { "ssc", GPIOB }, [GMBUS_PIN_VGADDC] = { "vga", GPIOA }, [GMBUS_PIN_PANEL] = { "panel", GPIOC }, [GMBUS_PIN_DPC] = { "dpc", GPIOD }, [GMBUS_PIN_DPB] = { "dpb", GPIOE }, [GMBUS_PIN_DPD] = { "dpd", GPIOF }, }; static const struct gmbus_pin gmbus_pins_bdw[] = { [GMBUS_PIN_VGADDC] = { "vga", GPIOA }, [GMBUS_PIN_DPC] = { "dpc", GPIOD }, [GMBUS_PIN_DPB] = { "dpb", GPIOE }, [GMBUS_PIN_DPD] = { "dpd", GPIOF }, }; static const struct gmbus_pin gmbus_pins_skl[] = { [GMBUS_PIN_DPC] = { "dpc", GPIOD }, [GMBUS_PIN_DPB] = { "dpb", GPIOE }, [GMBUS_PIN_DPD] = { "dpd", GPIOF }, }; static const struct gmbus_pin gmbus_pins_bxt[] = { [GMBUS_PIN_1_BXT] = { "dpb", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpc", GPIOC }, [GMBUS_PIN_3_BXT] = { "misc", GPIOD }, }; static const struct gmbus_pin gmbus_pins_cnp[] = { [GMBUS_PIN_1_BXT] = { "dpb", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpc", GPIOC }, [GMBUS_PIN_3_BXT] = { "misc", GPIOD }, [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, }; static const struct gmbus_pin gmbus_pins_icp[] = { [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION }, [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO }, }; static const struct gmbus_pin gmbus_pins_dg1[] = { [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, }; static const struct gmbus_pin gmbus_pins_dg2[] = { [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, }; static const struct gmbus_pin gmbus_pins_mtp[] = { [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, [GMBUS_PIN_5_MTP] = { "dpe", GPIOF }, [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, }; static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, unsigned int pin) { const struct gmbus_pin *pins; size_t size; if (INTEL_PCH_TYPE(i915) >= PCH_DG2) { pins = gmbus_pins_dg2; size = ARRAY_SIZE(gmbus_pins_dg2); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { pins = gmbus_pins_dg1; size = ARRAY_SIZE(gmbus_pins_dg1); } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) { pins = gmbus_pins_mtp; size = ARRAY_SIZE(gmbus_pins_mtp); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { pins = gmbus_pins_icp; size = ARRAY_SIZE(gmbus_pins_icp); } else if (HAS_PCH_CNP(i915)) { pins = gmbus_pins_cnp; size = ARRAY_SIZE(gmbus_pins_cnp); } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { pins = gmbus_pins_bxt; size = ARRAY_SIZE(gmbus_pins_bxt); } else if (DISPLAY_VER(i915) == 9) { pins = gmbus_pins_skl; size = ARRAY_SIZE(gmbus_pins_skl); } else if (IS_BROADWELL(i915)) { pins = gmbus_pins_bdw; size = ARRAY_SIZE(gmbus_pins_bdw); } else { pins = gmbus_pins; size = ARRAY_SIZE(gmbus_pins); } if (pin >= size || !pins[pin].name) return NULL; return &pins[pin]; } bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin) { return get_gmbus_pin(i915, pin); } /* Intel GPIO access functions */ #define I2C_RISEFALL_TIME 10 static inline struct intel_gmbus * to_intel_gmbus(struct i2c_adapter *i2c) { return container_of(i2c, struct intel_gmbus, adapter); } void intel_gmbus_reset(struct drm_i915_private *i915) { intel_de_write(i915, GMBUS0(i915), 0); intel_de_write(i915, GMBUS4(i915), 0); } static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { /* When using bit bashing for I2C, this bit needs to be set to 1 */ intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void pch_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, !enable ? BXT_GMBUS_GATING_DIS : 0); } static u32 get_reserved(struct intel_gmbus *bus) { struct drm_i915_private *i915 = bus->i915; u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ if (!IS_I830(i915) && !IS_I845G(i915)) reserved = intel_de_read_notrace(i915, bus->gpio_reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); return reserved; } static int get_clock(void *data) { struct intel_gmbus *bus = data; struct drm_i915_private *i915 = bus->i915; u32 reserved = get_reserved(bus); intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); intel_de_write_notrace(i915, bus->gpio_reg, reserved); return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_gmbus *bus = data; struct drm_i915_private *i915 = bus->i915; u32 reserved = get_reserved(bus); intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); intel_de_write_notrace(i915, bus->gpio_reg, reserved); return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; struct drm_i915_private *i915 = bus->i915; u32 reserved = get_reserved(bus); u32 clock_bits; if (state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits); intel_de_posting_read(i915, bus->gpio_reg); } static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; struct drm_i915_private *i915 = bus->i915; u32 reserved = get_reserved(bus); u32 data_bits; if (state_high) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; else data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits); intel_de_posting_read(i915, bus->gpio_reg); } static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; intel_gmbus_reset(i915); if (IS_PINEVIEW(i915)) pnv_gmbus_clock_gating(i915, false); set_data(bus, 1); set_clock(bus, 1); udelay(I2C_RISEFALL_TIME); return 0; } static void intel_gpio_post_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; set_data(bus, 1); set_clock(bus, 1); if (IS_PINEVIEW(i915)) pnv_gmbus_clock_gating(i915, true); } static void intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg) { struct i2c_algo_bit_data *algo; algo = &bus->bit_algo; bus->gpio_reg = gpio_reg; bus->adapter.algo_data = algo; algo->setsda = set_data; algo->setscl = set_clock; algo->getsda = get_data; algo->getscl = get_clock; algo->pre_xfer = intel_gpio_pre_xfer; algo->post_xfer = intel_gpio_post_xfer; algo->udelay = I2C_RISEFALL_TIME; algo->timeout = usecs_to_jiffies(2200); algo->data = bus; } static bool has_gmbus_irq(struct drm_i915_private *i915) { /* * encoder->shutdown() may want to use GMBUS * after irqs have already been disabled. */ return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); } static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) { DEFINE_WAIT(wait); u32 gmbus2; int ret; /* Important: The hw handles only the first bit, so set only one! Since * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ if (!has_gmbus_irq(i915)) irq_en = 0; add_wait_queue(&i915->display.gmbus.wait_queue, &wait); intel_de_write_fw(i915, GMBUS4(i915), irq_en); status |= GMBUS_SATOER; ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 2); if (ret) ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 50); intel_de_write_fw(i915, GMBUS4(i915), 0); remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) return -ENXIO; return ret; } static int gmbus_wait_idle(struct drm_i915_private *i915) { DEFINE_WAIT(wait); u32 irq_enable; int ret; /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; if (has_gmbus_irq(i915)) irq_enable = GMBUS_IDLE_EN; add_wait_queue(&i915->display.gmbus.wait_queue, &wait); intel_de_write_fw(i915, GMBUS4(i915), irq_enable); ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10); intel_de_write_fw(i915, GMBUS4(i915), 0); remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); return ret; } static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; } static int gmbus_xfer_read_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus0_reg, u32 gmbus1_index) { unsigned int size = len; bool burst_read = len > gmbus_max_xfer_size(i915); bool extra_byte_added = false; if (burst_read) { /* * As per HW Spec, for 512Bytes need to read extra Byte and * Ignore the extra byte read. */ if (len == 512) { extra_byte_added = true; len++; } size = len % 256 + 256; intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; val = intel_de_read_fw(i915, GMBUS3(i915)); do { if (extra_byte_added && len == 1) break; *buf++ = val & 0xff; val >>= 8; } while (--len && ++loop < 4); if (burst_read && len == size - 4) /* Reset the override bit */ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg); } return 0; } /* * HW spec says that 512Bytes in Burst read need special treatment. * But it doesn't talk about other multiple of 256Bytes. And couldn't locate * an I2C slave, which supports such a lengthy burst read too for experiments. * * So until things get clarified on HW support, to avoid the burst read length * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes. */ #define INTEL_GMBUS_BURST_READ_MAX_LEN 767U static int gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus0_reg, u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int rx_size = msg->len; unsigned int len; int ret; do { if (HAS_GMBUS_BURST_READ(i915)) len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); else len = min(rx_size, gmbus_max_xfer_size(i915)); ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len, gmbus0_reg, gmbus1_index); if (ret) return ret; rx_size -= len; buf += len; } while (rx_size != 0); return 0; } static int gmbus_xfer_write_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus1_index) { unsigned int chunk_size = len; u32 val, loop; val = loop = 0; while (len && loop < 4) { val |= *buf++ << (8 * loop++); len -= 1; } intel_de_write_fw(i915, GMBUS3(i915), val); intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; val = loop = 0; do { val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); intel_de_write_fw(i915, GMBUS3(i915), val); ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; } return 0; } static int gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int tx_size = msg->len; unsigned int len; int ret; do { len = min(tx_size, gmbus_max_xfer_size(i915)); ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len, gmbus1_index); if (ret) return ret; buf += len; tx_size -= len; } while (tx_size != 0); return 0; } /* * The gmbus controller can combine a 1 or 2 byte write with another read/write * that immediately follows it by using an "INDEX" cycle. */ static bool gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && msgs[i].addr == msgs[i + 1].addr && !(msgs[i].flags & I2C_M_RD) && (msgs[i].len == 1 || msgs[i].len == 2) && msgs[i + 1].len > 0); } static int gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, u32 gmbus0_reg) { u32 gmbus1_index = 0; u32 gmbus5 = 0; int ret; if (msgs[0].len == 2) gmbus5 = GMBUS_2BYTE_INDEX_EN | msgs[0].buf[1] | (msgs[0].buf[0] << 8); if (msgs[0].len == 1) gmbus1_index = GMBUS_CYCLE_INDEX | (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT); /* GMBUS5 holds 16-bit index */ if (gmbus5) intel_de_write_fw(i915, GMBUS5(i915), gmbus5); if (msgs[1].flags & I2C_M_RD) ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg, gmbus1_index); else ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) intel_de_write_fw(i915, GMBUS5(i915), 0); return ret; } static int do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, u32 gmbus0_source) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; int i = 0, inc, try = 0; int ret = 0; /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) bxt_gmbus_clock_gating(i915, false); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) pch_gmbus_clock_gating(i915, false); retry: intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { ret = gmbus_index_xfer(i915, &msgs[i], gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(i915, &msgs[i], gmbus0_source | bus->reg0, 0); } else { ret = gmbus_xfer_write(i915, &msgs[i], 0); } if (!ret) ret = gmbus_wait(i915, GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; else if (ret) goto clear_err; } /* Generate a STOP condition on the bus. Note that gmbus can't generata * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ if (gmbus_wait_idle(i915)) { drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } intel_de_write_fw(i915, GMBUS0(i915), 0); ret = ret ?: i; goto out; clear_err: /* * Wait for bus to IDLE before clearing NAK. * If we clear the NAK while bus is still active, then it will stay * active and the next transaction may fail. * * If no ACK is received during the address phase of a transaction, the * adapter must report -ENXIO. It is not clear what to return if no ACK * is received at other times. But we have to be careful to not return * spurious -ENXIO because that will prevent i2c and drm edid functions * from retrying. So return -ENXIO only when gmbus properly quiescents - * timing out seems to happen when there _is_ a ddc chip present, but * it's slow responding and only answers on the 2nd retry. */ ret = -ENXIO; if (gmbus_wait_idle(i915)) { drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out after NAK\n", adapter->name); ret = -ETIMEDOUT; } /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); intel_de_write_fw(i915, GMBUS1(i915), 0); intel_de_write_fw(i915, GMBUS0(i915), 0); drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); /* * Passive adapters sometimes NAK the first probe. Retry the first * message once on -ENXIO for GMBUS transfers; the bit banging algorithm * has retries internally. See also the retry loop in * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK on first message, retry\n", adapter->name); goto retry; } goto out; timeout: drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", bus->adapter.name, bus->reg0 & 0xff); intel_de_write_fw(i915, GMBUS0(i915), 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging * instead. Use EAGAIN to have i2c core retry. */ ret = -EAGAIN; out: /* Display WA #0868: skl,bxt,kbl,cfl,glk */ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) bxt_gmbus_clock_gating(i915, true); else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) pch_gmbus_clock_gating(i915, true); return ret; } static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; intel_wakeref_t wakeref; int ret; wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); if (ret < 0) bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY; } else { ret = do_gmbus_xfer(adapter, msgs, num, 0); if (ret == -EAGAIN) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; struct i2c_msg msgs[] = { { .addr = DRM_HDCP_DDC_ADDR, .flags = 0, .len = sizeof(cmd), .buf = &cmd, }, { .addr = DRM_HDCP_DDC_ADDR, .flags = 0, .len = sizeof(buf), .buf = buf, } }; intel_wakeref_t wakeref; int ret; wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); mutex_lock(&i915->display.gmbus.mutex); /* * In order to output Aksv to the receiver, use an indexed write to * pass the i2c command, and tell GMBUS to use the HW-provided value * instead of sourcing GMBUS3 for the data. */ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); mutex_unlock(&i915->display.gmbus.mutex); intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } static u32 gmbus_func(struct i2c_adapter *adapter) { return i2c_bit_algo.functionality(adapter) & (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | /* I2C_FUNC_10BIT_ADDR | */ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL); } static const struct i2c_algorithm gmbus_algorithm = { .master_xfer = gmbus_xfer, .functionality = gmbus_func }; static void gmbus_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; mutex_lock(&i915->display.gmbus.mutex); } static int gmbus_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; return mutex_trylock(&i915->display.gmbus.mutex); } static void gmbus_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; mutex_unlock(&i915->display.gmbus.mutex); } static const struct i2c_lock_operations gmbus_lock_ops = { .lock_bus = gmbus_lock_bus, .trylock_bus = gmbus_trylock_bus, .unlock_bus = gmbus_unlock_bus, }; /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses * @i915: i915 device private */ int intel_gmbus_setup(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); unsigned int pin; int ret; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE; else if (!HAS_GMCH(i915)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. */ i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE; mutex_init(&i915->display.gmbus.mutex); init_waitqueue_head(&i915->display.gmbus.wait_queue); for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { const struct gmbus_pin *gmbus_pin; struct intel_gmbus *bus; gmbus_pin = get_gmbus_pin(i915, pin); if (!gmbus_pin) continue; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { ret = -ENOMEM; goto err; } bus->adapter.owner = THIS_MODULE; bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "i915 gmbus %s", gmbus_pin->name); bus->adapter.dev.parent = &pdev->dev; bus->i915 = i915; bus->adapter.algo = &gmbus_algorithm; bus->adapter.lock_ops = &gmbus_lock_ops; /* * We wish to retry with bit banging * after a timed out GMBUS attempt. */ bus->adapter.retries = 1; /* By default use a conservative clock rate */ bus->reg0 = pin | GMBUS_RATE_100KHZ; /* gmbus seems to be broken on i830 */ if (IS_I830(i915)) bus->force_bit = 1; intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio)); ret = i2c_add_adapter(&bus->adapter); if (ret) { kfree(bus); goto err; } i915->display.gmbus.bus[pin] = bus; } intel_gmbus_reset(i915); return 0; err: intel_gmbus_teardown(i915); return ret; } struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915, unsigned int pin) { if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) || !i915->display.gmbus.bus[pin])) return NULL; return &i915->display.gmbus.bus[pin]->adapter; } void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); struct drm_i915_private *i915 = bus->i915; mutex_lock(&i915->display.gmbus.mutex); bus->force_bit += force_bit ? 1 : -1; drm_dbg_kms(&i915->drm, "%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); mutex_unlock(&i915->display.gmbus.mutex); } bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); return bus->force_bit; } void intel_gmbus_teardown(struct drm_i915_private *i915) { unsigned int pin; for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { struct intel_gmbus *bus; bus = i915->display.gmbus.bus[pin]; if (!bus) continue; i2c_del_adapter(&bus->adapter); kfree(bus); i915->display.gmbus.bus[pin] = NULL; } } void intel_gmbus_irq_handler(struct drm_i915_private *i915) { wake_up_all(&i915->display.gmbus.wait_queue); }
linux-master
drivers/gpu/drm/i915/display/intel_gmbus.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Shobhit Kumar <[email protected]> * */ #include <linux/gpio/consumer.h> #include <linux/gpio/machine.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/machine.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <asm/unaligned.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <video/mipi_display.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_gmbus_regs.h" #include "intel_pps_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_regs.h" #include "vlv_sideband.h" #define MIPI_TRANSFER_MODE_SHIFT 0 #define MIPI_VIRTUAL_CHANNEL_SHIFT 1 #define MIPI_PORT_SHIFT 3 /* base offsets for gpio pads */ #define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130 #define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 #define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 #define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140 #define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150 #define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160 #define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180 #define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190 #define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170 #define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100 #define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0 #define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0 #define VLV_GPIO_PCONF0(base_offset) (base_offset) #define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8) struct gpio_map { u16 base_offset; bool init; }; static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_0_HV_DDI0_HPD }, { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA }, { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL }, { VLV_GPIO_NC_3_PANEL0_VDDEN }, { VLV_GPIO_NC_4_PANEL0_BKLTEN }, { VLV_GPIO_NC_5_PANEL0_BKLTCTL }, { VLV_GPIO_NC_6_HV_DDI1_HPD }, { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA }, { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL }, { VLV_GPIO_NC_9_PANEL1_VDDEN }, { VLV_GPIO_NC_10_PANEL1_BKLTEN }, { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; struct i2c_adapter_lookup { u16 slave_addr; struct intel_dsi *intel_dsi; acpi_handle dev_handle; }; #define CHV_GPIO_IDX_START_N 0 #define CHV_GPIO_IDX_START_E 73 #define CHV_GPIO_IDX_START_SW 100 #define CHV_GPIO_IDX_START_SE 198 #define CHV_VBT_MAX_PINS_PER_FMLY 15 #define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) #define CHV_GPIO_GPIOEN (1 << 15) #define CHV_GPIO_GPIOCFG_GPIO (0 << 8) #define CHV_GPIO_GPIOCFG_GPO (1 << 8) #define CHV_GPIO_GPIOCFG_GPI (2 << 8) #define CHV_GPIO_GPIOCFG_HIZ (3 << 8) #define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) #define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) #define CHV_GPIO_CFGLOCK (1 << 31) /* ICL DSI Display GPIO Pins */ #define ICL_GPIO_DDSP_HPD_A 0 #define ICL_GPIO_L_VDDEN_1 1 #define ICL_GPIO_L_BKLTEN_1 2 #define ICL_GPIO_DDPA_CTRLCLK_1 3 #define ICL_GPIO_DDPA_CTRLDATA_1 4 #define ICL_GPIO_DDSP_HPD_B 5 #define ICL_GPIO_L_VDDEN_2 6 #define ICL_GPIO_L_BKLTEN_2 7 #define ICL_GPIO_DDPA_CTRLCLK_2 8 #define ICL_GPIO_DDPA_CTRLDATA_2 9 static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, u8 seq_port) { /* * If single link DSI is being used on any port, the VBT sequence block * send packet apparently always has 0 for the port. Just use the port * we have configured, and ignore the sequence block port. */ if (hweight8(intel_dsi->ports) == 1) return ffs(intel_dsi->ports) - 1; if (seq_port) { if (intel_dsi->ports & BIT(PORT_B)) return PORT_B; else if (intel_dsi->ports & BIT(PORT_C)) return PORT_C; } return PORT_A; } static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct mipi_dsi_device *dsi_device; u8 type, flags, seq_port; u16 len; enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); flags = *data++; type = *data++; len = *((u16 *) data); data += 2; seq_port = (flags >> MIPI_PORT_SHIFT) & 3; port = intel_dsi_seq_port_to_port(intel_dsi, seq_port); if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port])) goto out; dsi_device = intel_dsi->dsi_hosts[port]->device; if (!dsi_device) { drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n", port_name(port)); goto out; } if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1) dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; else dsi_device->mode_flags |= MIPI_DSI_MODE_LPM; dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3; switch (type) { case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: mipi_dsi_generic_write(dsi_device, NULL, 0); break; case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: mipi_dsi_generic_write(dsi_device, data, 1); break; case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: mipi_dsi_generic_write(dsi_device, data, 2); break; case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: drm_dbg(&dev_priv->drm, "Generic Read not yet implemented or used\n"); break; case MIPI_DSI_GENERIC_LONG_WRITE: mipi_dsi_generic_write(dsi_device, data, len); break; case MIPI_DSI_DCS_SHORT_WRITE: mipi_dsi_dcs_write_buffer(dsi_device, data, 1); break; case MIPI_DSI_DCS_SHORT_WRITE_PARAM: mipi_dsi_dcs_write_buffer(dsi_device, data, 2); break; case MIPI_DSI_DCS_READ: drm_dbg(&dev_priv->drm, "DCS Read not yet implemented or used\n"); break; case MIPI_DSI_DCS_LONG_WRITE: mipi_dsi_dcs_write_buffer(dsi_device, data, len); break; } if (DISPLAY_VER(dev_priv) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: data += len; return data; } static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); u32 delay = *((const u32 *) data); drm_dbg_kms(&i915->drm, "%d usecs\n", delay); usleep_range(delay, delay + 10); data += 4; return data; } static void vlv_exec_gpio(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct gpio_map *map; u16 pconf0, padval; u32 tmp; u8 port; if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) { drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n", gpio_index); return; } map = &vlv_gpio_table[gpio_index]; if (connector->panel.vbt.dsi.seq_version >= 3) { /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ port = IOSF_PORT_GPIO_NC; } else { if (gpio_source == 0) { port = IOSF_PORT_GPIO_NC; } else if (gpio_source == 1) { drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); return; } else { drm_dbg_kms(&dev_priv->drm, "unknown gpio source %u\n", gpio_source); return; } } pconf0 = VLV_GPIO_PCONF0(map->base_offset); padval = VLV_GPIO_PAD_VAL(map->base_offset); vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); if (!map->init) { /* FIXME: remove constant below */ vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00); map->init = true; } tmp = 0x4 | value; vlv_iosf_sb_write(dev_priv, port, padval, tmp); vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); } static void chv_exec_gpio(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u16 cfg0, cfg1; u16 family_num; u8 port; if (connector->panel.vbt.dsi.seq_version >= 3) { if (gpio_index >= CHV_GPIO_IDX_START_SE) { /* XXX: it's unclear whether 255->57 is part of SE. */ gpio_index -= CHV_GPIO_IDX_START_SE; port = CHV_IOSF_PORT_GPIO_SE; } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { gpio_index -= CHV_GPIO_IDX_START_SW; port = CHV_IOSF_PORT_GPIO_SW; } else if (gpio_index >= CHV_GPIO_IDX_START_E) { gpio_index -= CHV_GPIO_IDX_START_E; port = CHV_IOSF_PORT_GPIO_E; } else { port = CHV_IOSF_PORT_GPIO_N; } } else { /* XXX: The spec is unclear about CHV GPIO on seq v2 */ if (gpio_source != 0) { drm_dbg_kms(&dev_priv->drm, "unknown gpio source %u\n", gpio_source); return; } if (gpio_index >= CHV_GPIO_IDX_START_E) { drm_dbg_kms(&dev_priv->drm, "invalid gpio index %u for GPIO N\n", gpio_index); return; } port = CHV_IOSF_PORT_GPIO_N; } family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); vlv_iosf_sb_write(dev_priv, port, cfg1, 0); vlv_iosf_sb_write(dev_priv, port, cfg0, CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); } static void bxt_exec_gpio(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); /* XXX: this table is a quick ugly hack. */ static struct gpio_desc *bxt_gpio_table[U8_MAX + 1]; struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index]; if (!gpio_desc) { gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, NULL, gpio_index, value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH); if (IS_ERR_OR_NULL(gpio_desc)) { drm_err(&dev_priv->drm, "GPIO index %u request failed (%ld)\n", gpio_index, PTR_ERR(gpio_desc)); return; } bxt_gpio_table[gpio_index] = gpio_desc; } gpiod_set_value(gpio_desc, value); } static void icl_exec_gpio(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); } enum { MIPI_RESET_1 = 0, MIPI_AVDD_EN_1, MIPI_BKLT_EN_1, MIPI_AVEE_EN_1, MIPI_VIO_EN_1, MIPI_RESET_2, MIPI_AVDD_EN_2, MIPI_BKLT_EN_2, MIPI_AVEE_EN_2, MIPI_VIO_EN_2, }; static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, int gpio, bool value) { int index; if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) return; switch (gpio) { case MIPI_RESET_1: case MIPI_RESET_2: index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B; /* * Disable HPD to set the pin to output, and set output * value. The HPD pin should not be enabled for DSI anyway, * assuming the board design and VBT are sane, and the pin isn't * used by a non-DSI encoder. * * The locking protects against concurrent SHOTPLUG_CTL_DDI * modifications in irq setup and handling. */ spin_lock_irq(&dev_priv->irq_lock); intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI, SHOTPLUG_CTL_DDI_HPD_ENABLE(index) | SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index), value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0); spin_unlock_irq(&dev_priv->irq_lock); break; case MIPI_AVDD_EN_1: case MIPI_AVDD_EN_2: index = gpio == MIPI_AVDD_EN_1 ? 0 : 1; intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON, value ? PANEL_POWER_ON : 0); break; case MIPI_BKLT_EN_1: case MIPI_BKLT_EN_2: index = gpio == MIPI_BKLT_EN_1 ? 0 : 1; intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE, value ? EDP_BLC_ENABLE : 0); break; case MIPI_AVEE_EN_1: case MIPI_AVEE_EN_2: index = gpio == MIPI_AVEE_EN_1 ? 1 : 2; intel_de_rmw(dev_priv, GPIO(dev_priv, index), GPIO_CLOCK_VAL_OUT, GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0)); break; case MIPI_VIO_EN_1: case MIPI_VIO_EN_2: index = gpio == MIPI_VIO_EN_1 ? 1 : 2; intel_de_rmw(dev_priv, GPIO(dev_priv, index), GPIO_DATA_VAL_OUT, GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT | GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0)); break; default: MISSING_CASE(gpio); } } static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; u8 gpio_source, gpio_index = 0, gpio_number; bool value; bool native = DISPLAY_VER(dev_priv) >= 11; if (connector->panel.vbt.dsi.seq_version >= 3) gpio_index = *data++; gpio_number = *data++; /* gpio source in sequence v2 only */ if (connector->panel.vbt.dsi.seq_version == 2) gpio_source = (*data >> 1) & 3; else gpio_source = 0; if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1)) native = false; /* pull up/down */ value = *data++ & 1; drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); if (native) icl_native_gpio_set_value(dev_priv, gpio_number, value); else if (DISPLAY_VER(dev_priv) >= 11) icl_exec_gpio(connector, gpio_source, gpio_index, value); else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(connector, gpio_source, gpio_number, value); else if (IS_CHERRYVIEW(dev_priv)) chv_exec_gpio(connector, gpio_source, gpio_number, value); else bxt_exec_gpio(connector, gpio_source, gpio_index, value); return data; } #ifdef CONFIG_ACPI static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) { struct i2c_adapter_lookup *lookup = data; struct intel_dsi *intel_dsi = lookup->intel_dsi; struct acpi_resource_i2c_serialbus *sb; struct i2c_adapter *adapter; acpi_handle adapter_handle; acpi_status status; if (!i2c_acpi_get_i2c_resource(ares, &sb)) return 1; if (lookup->slave_addr != sb->slave_address) return 1; status = acpi_get_handle(lookup->dev_handle, sb->resource_source.string_ptr, &adapter_handle); if (ACPI_FAILURE(status)) return 1; adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); if (adapter) intel_dsi->i2c_bus_num = adapter->nr; return 1; } static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 slave_addr) { struct drm_device *drm_dev = intel_dsi->base.base.dev; struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); struct i2c_adapter_lookup lookup = { .slave_addr = slave_addr, .intel_dsi = intel_dsi, .dev_handle = acpi_device_handle(adev), }; LIST_HEAD(resource_list); acpi_dev_get_resources(adev, &resource_list, i2c_adapter_lookup, &lookup); acpi_dev_free_resource_list(&resource_list); } #else static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 slave_addr) { } #endif static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); struct i2c_adapter *adapter; struct i2c_msg msg; int ret; u8 vbt_i2c_bus_num = *(data + 2); u16 slave_addr = *(u16 *)(data + 3); u8 reg_offset = *(data + 5); u8 payload_size = *(data + 6); u8 *payload_data; if (intel_dsi->i2c_bus_num < 0) { intel_dsi->i2c_bus_num = vbt_i2c_bus_num; i2c_acpi_find_adapter(intel_dsi, slave_addr); } adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); if (!adapter) { drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n"); goto err_bus; } payload_data = kzalloc(payload_size + 1, GFP_KERNEL); if (!payload_data) goto err_alloc; payload_data[0] = reg_offset; memcpy(&payload_data[1], (data + 7), payload_size); msg.addr = slave_addr; msg.flags = 0; msg.len = payload_size + 1; msg.buf = payload_data; ret = i2c_transfer(adapter, &msg, 1); if (ret < 0) drm_err(&i915->drm, "Failed to xfer payload of size (%u) to reg (%u)\n", payload_size, reg_offset); kfree(payload_data); err_alloc: i2c_put_adapter(adapter); err_bus: return data + payload_size + 7; } static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n"); return data + *(data + 5) + 6; } static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); #ifdef CONFIG_PMIC_OPREGION u32 value, mask, reg_address; u16 i2c_address; int ret; /* byte 0 aka PMIC Flag is reserved */ i2c_address = get_unaligned_le16(data + 1); reg_address = get_unaligned_le32(data + 3); value = get_unaligned_le32(data + 7); mask = get_unaligned_le32(data + 11); ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address, reg_address, value, mask); if (ret) drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret); #else drm_err(&i915->drm, "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); #endif return data + 15; } typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, const u8 *data); static const fn_mipi_elem_exec exec_elem[] = { [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet, [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay, [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio, [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c, [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi, [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic, }; /* * MIPI Sequence from VBT #53 parsing logic * We have already separated each seqence during bios parsing * Following is generic execution function for any sequence */ static const char * const seq_name[] = { [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF", [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON", [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF", }; static const char *sequence_name(enum mipi_seq seq_id) { if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id]) return seq_name[seq_id]; else return "(unknown)"; } static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, enum mipi_seq seq_id) { struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct intel_connector *connector = intel_dsi->attached_connector; const u8 *data; fn_mipi_elem_exec mipi_elem_exec; if (drm_WARN_ON(&dev_priv->drm, seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence))) return; data = connector->panel.vbt.dsi.sequence[seq_id]; if (!data) return; drm_WARN_ON(&dev_priv->drm, *data != seq_id); drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n", seq_id, sequence_name(seq_id)); /* Skip Sequence Byte. */ data++; /* Skip Size of Sequence. */ if (connector->panel.vbt.dsi.seq_version >= 3) data += 4; while (1) { u8 operation_byte = *data++; u8 operation_size = 0; if (operation_byte == MIPI_SEQ_ELEM_END) break; if (operation_byte < ARRAY_SIZE(exec_elem)) mipi_elem_exec = exec_elem[operation_byte]; else mipi_elem_exec = NULL; /* Size of Operation. */ if (connector->panel.vbt.dsi.seq_version >= 3) operation_size = *data++; if (mipi_elem_exec) { const u8 *next = data + operation_size; data = mipi_elem_exec(intel_dsi, data); /* Consistency check if we have size. */ if (operation_size && data != next) { drm_err(&dev_priv->drm, "Inconsistent operation size\n"); return; } } else if (operation_size) { /* We have size, skip. */ drm_dbg_kms(&dev_priv->drm, "Unsupported MIPI operation byte %u\n", operation_byte); data += operation_size; } else { /* No size, can't skip without parsing. */ drm_err(&dev_priv->drm, "Unsupported MIPI operation byte %u\n", operation_byte); return; } } } void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id) { if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel) gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1); if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight) gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1); intel_dsi_vbt_exec(intel_dsi, seq_id); if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel) gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight) gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0); } void intel_dsi_log_params(struct intel_dsi *intel_dsi) { struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk); drm_dbg_kms(&i915->drm, "Pixel overlap %d\n", intel_dsi->pixel_overlap); drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count); drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); drm_dbg_kms(&i915->drm, "Video mode format %s\n", intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? "non-burst with sync pulse" : intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? "non-burst with sync events" : intel_dsi->video_mode == BURST_MODE ? "burst" : "<unknown>"); drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val); drm_dbg_kms(&i915->drm, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt)); drm_dbg_kms(&i915->drm, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop)); drm_dbg_kms(&i915->drm, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) drm_dbg_kms(&i915->drm, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) drm_dbg_kms(&i915->drm, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); else drm_dbg_kms(&i915->drm, "Dual link: NONE\n"); drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format); drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div); drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count); drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); drm_dbg_kms(&i915->drm, "BTA %s\n", str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); } bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps; struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode; u16 burst_mode_ratio; enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; intel_dsi->lane_count = mipi_config->lane_cnt + 1; intel_dsi->pixel_format = pixel_format_from_register_bits( mipi_config->videomode_color_format << 7); intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->pixel_overlap = mipi_config->pixel_overlap; intel_dsi->operation_mode = mipi_config->is_cmd_mode; intel_dsi->video_mode = mipi_config->video_transfer_mode; intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout; intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout; intel_dsi->rst_timer_val = mipi_config->device_reset_timer; intel_dsi->init_count = mipi_config->master_init_timer; intel_dsi->bw_timer = mipi_config->dbi_bw_timer; intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; intel_dsi->bgr_enabled = mipi_config->rgb_flip; /* Starting point, adjusted depending on dual link and burst mode */ intel_dsi->pclk = mode->clock; /* In dual link mode each port needs half of pixel clock */ if (intel_dsi->dual_link) { intel_dsi->pclk /= 2; /* we can enable pixel_overlap if needed by panel. In this * case we need to increase the pixelclock for extra pixels */ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000); } } /* Burst Mode Ratio * Target ddr frequency from VBT / non burst ddr freq * multiply by 100 to preserve remainder */ if (intel_dsi->video_mode == BURST_MODE) { if (mipi_config->target_burst_mode_freq) { u32 bitrate = intel_dsi_bitrate(intel_dsi); /* * Sometimes the VBT contains a slightly lower clock, * then the bitrate we have calculated, in this case * just replace it with the calculated bitrate. */ if (mipi_config->target_burst_mode_freq < bitrate && intel_fuzzy_clock_check( mipi_config->target_burst_mode_freq, bitrate)) mipi_config->target_burst_mode_freq = bitrate; if (mipi_config->target_burst_mode_freq < bitrate) { drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n"); return false; } burst_mode_ratio = DIV_ROUND_UP( mipi_config->target_burst_mode_freq * 100, bitrate); intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); } else { drm_err(&dev_priv->drm, "Burst mode target is not set\n"); return false; } } else burst_mode_ratio = 100; intel_dsi->burst_mode_ratio = burst_mode_ratio; /* delays in VBT are in unit of 100us, so need to convert * here in ms * Delay (100us) * 100 /1000 = Delay / 10 (ms) */ intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10; intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10; intel_dsi->panel_on_delay = pps->panel_on_delay / 10; intel_dsi->panel_off_delay = pps->panel_off_delay / 10; intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10; intel_dsi->i2c_bus_num = -1; /* a regular driver would get the device in probe */ for_each_dsi_port(port, intel_dsi->ports) { mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device); } return true; } /* * On some BYT/CHT devs some sequences are incomplete and we need to manually * control some GPIOs. We need to add a GPIO lookup table before we get these. * If the GOP did not initialize the panel (HDMI inserted) we may need to also * change the pinmux for the SoC's PWM0 pin from GPIO to PWM. */ static struct gpiod_lookup_table pmic_panel_gpio_table = { /* Intel GFX is consumer */ .dev_id = "0000:00:02.0", .table = { /* Panel EN/DISABLE */ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH), { } }, }; static struct gpiod_lookup_table soc_panel_gpio_table = { .dev_id = "0000:00:02.0", .table = { GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH), { } }, }; static const struct pinctrl_map soc_pwm_pinctrl_map[] = { PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00", "pwm0_grp", "pwm"), }; void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; bool want_backlight_gpio = false; bool want_panel_gpio = false; struct pinctrl *pinctrl; int ret; if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && mipi_config->pwm_blc == PPS_BLC_PMIC) { gpiod_add_lookup_table(&pmic_panel_gpio_table); want_panel_gpio = true; } if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { gpiod_add_lookup_table(&soc_panel_gpio_table); want_panel_gpio = true; want_backlight_gpio = true; /* Ensure PWM0 pin is muxed as PWM instead of GPIO */ ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, ARRAY_SIZE(soc_pwm_pinctrl_map)); if (ret) drm_err(&dev_priv->drm, "Failed to register pwm0 pinmux mapping\n"); pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); if (IS_ERR(pinctrl)) drm_err(&dev_priv->drm, "Failed to set pinmux to PWM\n"); } if (want_panel_gpio) { intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); if (IS_ERR(intel_dsi->gpio_panel)) { drm_err(&dev_priv->drm, "Failed to own gpio for panel control\n"); intel_dsi->gpio_panel = NULL; } } if (want_backlight_gpio) { intel_dsi->gpio_backlight = gpiod_get(dev->dev, "backlight", flags); if (IS_ERR(intel_dsi->gpio_backlight)) { drm_err(&dev_priv->drm, "Failed to own gpio for backlight control\n"); intel_dsi->gpio_backlight = NULL; } } } void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; if (intel_dsi->gpio_panel) { gpiod_put(intel_dsi->gpio_panel); intel_dsi->gpio_panel = NULL; } if (intel_dsi->gpio_backlight) { gpiod_put(intel_dsi->gpio_backlight); intel_dsi->gpio_backlight = NULL; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && mipi_config->pwm_blc == PPS_BLC_PMIC) gpiod_remove_lookup_table(&pmic_panel_gpio_table); if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { pinctrl_unregister_mappings(soc_pwm_pinctrl_map); gpiod_remove_lookup_table(&soc_panel_gpio_table); } }
linux-master
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
/* * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> */ #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_connector.h" #include "intel_crt.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" #include "intel_hotplug_irq.h" #include "intel_load_detect.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ ADPA_CRT_HOTPLUG_WARMUP_10MS | \ ADPA_CRT_HOTPLUG_SAMPLE_4S | \ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ ADPA_CRT_HOTPLUG_VOLREF_325MV | \ ADPA_CRT_HOTPLUG_ENABLE) struct intel_crt { struct intel_encoder base; /* DPMS state is stored in the connector, which we need in the * encoder's enable/disable callbacks */ struct intel_connector *connector; bool force_hotplug_required; i915_reg_t adpa_reg; }; static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) { return container_of(encoder, struct intel_crt, base); } static struct intel_crt *intel_attached_crt(struct intel_connector *connector) { return intel_encoder_to_crt(intel_attached_encoder(connector)); } bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t adpa_reg, enum pipe *pipe) { u32 val; val = intel_de_read(dev_priv, adpa_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT; else *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT; return val & ADPA_DAC_ENABLE; } static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); intel_wakeref_t wakeref; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; tmp = intel_de_read(dev_priv, crt->adpa_reg); if (tmp & ADPA_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (tmp & ADPA_VSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; return flags; } static void intel_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void hsw_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { lpt_pch_get_config(pipe_config); hsw_ddi_get_config(encoder, pipe_config); pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); } /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int mode) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 adpa; if (DISPLAY_VER(dev_priv) >= 5) adpa = ADPA_HOTPLUG_BITS; else adpa = 0; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; /* For CPT allow 3 pipe config, for others just use A or B */ if (HAS_PCH_LPT(dev_priv)) ; /* Those bits don't exist here */ else if (HAS_PCH_CPT(dev_priv)) adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe); else adpa |= ADPA_PIPE_SEL(crtc->pipe); if (!HAS_PCH_SPLIT(dev_priv)) intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); switch (mode) { case DRM_MODE_DPMS_ON: adpa |= ADPA_DAC_ENABLE; break; case DRM_MODE_DPMS_STANDBY: adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; break; case DRM_MODE_DPMS_SUSPEND: adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; break; case DRM_MODE_DPMS_OFF: adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; break; } intel_de_write(dev_priv, crt->adpa_reg, adpa); } static void intel_disable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF); } static void pch_disable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { } static void pch_post_disable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_crt(state, encoder, old_crtc_state, old_conn_state); } static void hsw_disable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } static void hsw_post_disable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_crtc_vblank_off(old_crtc_state); intel_disable_transcoder(old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state); ilk_pfit_disable(old_crtc_state); intel_ddi_disable_transcoder_clock(old_crtc_state); pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); lpt_pch_disable(state, crtc); hsw_fdi_disable(encoder); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } static void hsw_pre_enable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); hsw_fdi_link_train(encoder, crtc_state); intel_ddi_enable_transcoder_clock(encoder, crtc_state); } static void hsw_enable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); intel_ddi_enable_transcoder_func(encoder, crtc_state); intel_enable_transcoder(crtc_state); lpt_pch_enable(state, crtc); intel_crtc_vblank_on(crtc_state); intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); intel_crtc_wait_for_next_vblank(crtc); intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } static void intel_enable_crt(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); } static enum drm_mode_status intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); int max_dotclk = dev_priv->max_dotclk_freq; int max_clock; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (mode->clock < 25000) return MODE_CLOCK_LOW; if (HAS_PCH_LPT(dev_priv)) max_clock = 180000; else if (IS_VALLEYVIEW(dev_priv)) /* * 270 MHz due to current DPLL limits, * DAC limit supposedly 355 MHz. */ max_clock = 270000; else if (IS_DISPLAY_VER(dev_priv, 3, 4)) max_clock = 400000; else max_clock = 350000; if (mode->clock > max_clock) return MODE_CLOCK_HIGH; if (mode->clock > max_dotclk) return MODE_CLOCK_HIGH; /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ if (HAS_PCH_LPT(dev_priv) && ilk_get_lanes_required(mode->clock, 270000, 24) > 2) return MODE_CLOCK_HIGH; /* HSW/BDW FDI limited to 4k */ if (mode->hdisplay > 4096) return MODE_H_ILLEGAL; return MODE_OK; } static int intel_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return 0; } static int pch_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return 0; } static int hsw_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; /* HSW/BDW FDI limited to 4k */ if (adjusted_mode->crtc_hdisplay > 4096 || adjusted_mode->crtc_hblank_start > 4096) return -EINVAL; pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; /* LPT FDI RX only supports 8bpc. */ if (HAS_PCH_LPT(dev_priv)) { if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { drm_dbg_kms(&dev_priv->drm, "LPT only supports 24bpp\n"); return -EINVAL; } pipe_config->pipe_bpp = 24; } /* FDI must always be 2.7 GHz */ pipe_config->port_clock = 135000 * 2; adjusted_mode->crtc_clock = lpt_iclkip(pipe_config); return 0; } static bool ilk_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); u32 adpa; bool ret; /* The first time through, trigger an explicit detection cycle */ if (crt->force_hotplug_required) { bool turn_off_dac = HAS_PCH_SPLIT(dev_priv); u32 save_adpa; crt->force_hotplug_required = false; save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); drm_dbg_kms(&dev_priv->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; if (turn_off_dac) adpa &= ~ADPA_DAC_ENABLE; intel_de_write(dev_priv, crt->adpa_reg, adpa); if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) drm_dbg_kms(&dev_priv->drm, "timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { intel_de_write(dev_priv, crt->adpa_reg, save_adpa); intel_de_posting_read(dev_priv, crt->adpa_reg); } } /* Check the status to see if both blue and green are on now */ adpa = intel_de_read(dev_priv, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; } static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(dev); bool reenable_hpd; u32 adpa; bool ret; u32 save_adpa; /* * Doing a force trigger causes a hpd interrupt to get sent, which can * get us stuck in a loop if we're polling: * - We enable power wells and reset the ADPA * - output_poll_exec does force probe on VGA, triggering a hpd * - HPD handler waits for poll to unlock dev->mode_config.mutex * - output_poll_exec shuts off the ADPA, unlocks * dev->mode_config.mutex * - HPD handler runs, resets ADPA and brings us back to the start * * Just disable HPD interrupts here to prevent this */ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); drm_dbg_kms(&dev_priv->drm, "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; intel_de_write(dev_priv, crt->adpa_reg, adpa); if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { drm_dbg_kms(&dev_priv->drm, "timed out waiting for FORCE_TRIGGER"); intel_de_write(dev_priv, crt->adpa_reg, save_adpa); } /* Check the status to see if both blue and green are on now */ adpa = intel_de_read(dev_priv, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; drm_dbg_kms(&dev_priv->drm, "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); if (reenable_hpd) intel_hpd_enable(dev_priv, crt->base.hpd_pin); return ret; } static bool intel_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 stat; bool ret = false; int i, tries = 0; if (HAS_PCH_SPLIT(dev_priv)) return ilk_crt_detect_hotplug(connector); if (IS_VALLEYVIEW(dev_priv)) return valleyview_crt_detect_hotplug(connector); /* * On 4 series desktop, CRT detect sequence need to be done twice * to get a reliable result. */ if (IS_G45(dev_priv)) tries = 2; else tries = 1; for (i = 0; i < tries ; i++) { /* turn on the FORCE_DETECT */ i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN, CRT_HOTPLUG_FORCE_DETECT, 1000)) drm_dbg_kms(&dev_priv->drm, "timed out waiting for FORCE_DETECT to go off"); } stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT); if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ret = true; /* clear the interrupt we just generated, if any */ intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); return ret; } static const struct drm_edid *intel_crt_get_edid(struct drm_connector *connector, struct i2c_adapter *i2c) { const struct drm_edid *drm_edid; drm_edid = drm_edid_read_ddc(connector, i2c); if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) { drm_dbg_kms(connector->dev, "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(i2c, true); drm_edid = drm_edid_read_ddc(connector, i2c); intel_gmbus_force_bit(i2c, false); } return drm_edid; } /* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ static int intel_crt_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { const struct drm_edid *drm_edid; int ret; drm_edid = intel_crt_get_edid(connector, adapter); if (!drm_edid) return 0; ret = intel_connector_update_modes(connector, drm_edid); drm_edid_free(drm_edid); return ret; } static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); const struct drm_edid *drm_edid; struct i2c_adapter *i2c; bool ret = false; i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); drm_edid = intel_crt_get_edid(connector, i2c); if (drm_edid) { const struct edid *edid = drm_edid_raw(drm_edid); bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; /* * This may be a DVI-I connector with a shared DDC * link between analog and digital outputs, so we * have to check the EDID input spec of the attached device. */ if (!is_digital) { drm_dbg_kms(&dev_priv->drm, "CRT detected via DDC:0x50 [EDID]\n"); ret = true; } else { drm_dbg_kms(&dev_priv->drm, "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } } else { drm_dbg_kms(&dev_priv->drm, "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } drm_edid_free(drm_edid); return ret; } static enum drm_connector_status intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 save_bclrpat; u32 save_vtotal; u32 vtotal, vactive; u32 vsample; u32 vblank, vblank_start, vblank_end; u32 dsl; u8 st00; enum drm_connector_status status; drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder)); save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1; vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; /* Set the border color to purple. */ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050); if (DISPLAY_VER(dev_priv) != 2) { u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf | TRANSCONF_FORCE_BORDER); intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); /* Wait for next Vblank to substitue * border color for Color info */ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf); } else { bool restore_vblank = false; int count, detect; /* * If there isn't any border, add some. * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; vblank_start = vsync_start; intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), VBLANK_START(vblank_start - 1) | VBLANK_END(vblank_end - 1)); restore_vblank = true; } /* sample in the vertical border, selecting the larger one */ if (vblank_start - vactive >= vtotal - vblank_end) vsample = (vblank_start + vactive) >> 1; else vsample = (vtotal + vblank_end) >> 1; /* * Wait for the border to be displayed */ while (intel_de_read(dev_priv, PIPEDSL(pipe)) >= vactive) ; while ((dsl = intel_de_read(dev_priv, PIPEDSL(pipe))) <= vsample) ; /* * Watch ST00 for an entire scanline */ detect = 0; count = 0; do { count++; /* Read the ST00 VGA status register */ st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; } while ((intel_de_read(dev_priv, PIPEDSL(pipe)) == dsl)); /* restore vblank if necessary */ if (restore_vblank) intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank); /* * If more than 3/4 of the scanline detected a monitor, * then it is assumed to be present. This works even on i830, * where there isn't any way to force the border color across * the screen */ status = detect * 4 > count * 3 ? connector_status_connected : connector_status_disconnected; } /* Restore previous settings */ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat); return status; } static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id) { DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_spurious_crt_detect[] = { { .callback = intel_spurious_crt_detect_dmi_callback, .ident = "ACER ZGB", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ACER"), DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), }, }, { .callback = intel_spurious_crt_detect_dmi_callback, .ident = "Intel DZ77BH-55K", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"), }, }, { } }; static int intel_crt_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; struct drm_atomic_state *state; intel_wakeref_t wakeref; int status; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); if (!INTEL_DISPLAY_ENABLED(dev_priv)) return connector_status_disconnected; if (dev_priv->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); goto load_detect; } /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_spurious_crt_detect)) return connector_status_disconnected; wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); if (I915_HAS_HOTPLUG(dev_priv)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. */ if (intel_crt_detect_hotplug(connector)) { drm_dbg_kms(&dev_priv->drm, "CRT detected via hotplug\n"); status = connector_status_connected; goto out; } else drm_dbg_kms(&dev_priv->drm, "CRT not detected via hotplug\n"); } if (intel_crt_detect_ddc(connector)) { status = connector_status_connected; goto out; } /* Load detection is broken on HPD capable machines. Whoever wants a * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ if (I915_HAS_HOTPLUG(dev_priv)) { status = connector_status_disconnected; goto out; } load_detect: if (!force) { status = connector->status; goto out; } /* for pre-945g platforms use load detect */ state = intel_load_detect_get_pipe(connector, ctx); if (IS_ERR(state)) { status = PTR_ERR(state); } else if (!state) { status = connector_status_unknown; } else { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else if (DISPLAY_VER(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); else if (dev_priv->params.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; intel_load_detect_release_pipe(connector, state, ctx); } out: intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); /* * Make sure the refs for power wells enabled during detect are * dropped to avoid a new detect cycle triggered by HPD polling. */ intel_display_power_flush_work(dev_priv); return status; } static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); struct intel_encoder *intel_encoder = &crt->base; intel_wakeref_t wakeref; struct i2c_adapter *i2c; int ret; wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev_priv)) goto out; /* Try to probe digital port for output in DVI-I -> VGA mode. */ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB); ret = intel_crt_ddc_get_modes(connector, i2c); out: intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } void intel_crt_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); if (DISPLAY_VER(dev_priv) >= 5) { u32 adpa; adpa = intel_de_read(dev_priv, crt->adpa_reg); adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= ADPA_HOTPLUG_BITS; intel_de_write(dev_priv, crt->adpa_reg, adpa); intel_de_posting_read(dev_priv, crt->adpa_reg); drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = true; } } /* * Routines for controlling stuff on the analog port */ static const struct drm_connector_funcs intel_crt_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .detect_ctx = intel_crt_detect, .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { .reset = intel_crt_reset, .destroy = intel_encoder_destroy, }; void intel_crt_init(struct drm_i915_private *dev_priv) { struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; i915_reg_t adpa_reg; u32 adpa; if (HAS_PCH_SPLIT(dev_priv)) adpa_reg = PCH_ADPA; else if (IS_VALLEYVIEW(dev_priv)) adpa_reg = VLV_ADPA; else adpa_reg = ADPA; adpa = intel_de_read(dev_priv, adpa_reg); if ((adpa & ADPA_DAC_ENABLE) == 0) { /* * On some machines (some IVB at least) CRT can be * fused off, but there's no known fuse bit to * indicate that. On these machine the ADPA register * works normally, except the DAC enable bit won't * take. So the only way to tell is attempt to enable * it and see what happens. */ intel_de_write(dev_priv, adpa_reg, adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0) return; intel_de_write(dev_priv, adpa_reg, adpa); } crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); if (!crt) return; intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(crt); return; } connector = &intel_connector->base; crt->connector = intel_connector; drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI); if (IS_I830(dev_priv)) crt->base.pipe_mask = BIT(PIPE_A); else crt->base.pipe_mask = ~0; if (DISPLAY_VER(dev_priv) != 2) connector->interlace_allowed = true; crt->adpa_reg = adpa_reg; crt->base.power_domain = POWER_DOMAIN_PORT_CRT; if (I915_HAS_HOTPLUG(dev_priv) && !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; } else { intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; } if (HAS_DDI(dev_priv)) { assert_port_valid(dev_priv, PORT_E); crt->base.port = PORT_E; crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; crt->base.compute_config = hsw_crt_compute_config; crt->base.pre_pll_enable = hsw_pre_pll_enable_crt; crt->base.pre_enable = hsw_pre_enable_crt; crt->base.enable = hsw_enable_crt; crt->base.disable = hsw_disable_crt; crt->base.post_disable = hsw_post_disable_crt; crt->base.enable_clock = hsw_ddi_enable_clock; crt->base.disable_clock = hsw_ddi_disable_clock; crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled; intel_ddi_buf_trans_init(&crt->base); } else { if (HAS_PCH_SPLIT(dev_priv)) { crt->base.compute_config = pch_crt_compute_config; crt->base.disable = pch_disable_crt; crt->base.post_disable = pch_post_disable_crt; } else { crt->base.compute_config = intel_crt_compute_config; crt->base.disable = intel_disable_crt; } crt->base.port = PORT_NONE; crt->base.get_config = intel_crt_get_config; crt->base.get_hw_state = intel_crt_get_hw_state; crt->base.enable = intel_enable_crt; } intel_connector->get_hw_state = intel_connector_get_hw_state; drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); /* * TODO: find a proper way to discover whether we need to set the the * polarity and link reversal bits or not, instead of relying on the * BIOS. */ if (HAS_PCH_LPT(dev_priv)) { u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; dev_priv->display.fdi.rx_config = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); }
linux-master
drivers/gpu/drm/i915/display/intel_crt.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "icl_dsi_regs.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_fdi_regs.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug_irq.h" #include "intel_pmdemand.h" #include "intel_psr.h" #include "intel_psr_regs.h" static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_crtc_handle_vblank(&crtc->base); } /** * ilk_update_display_irq - update DEIMR * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ void ilk_update_display_irq(struct drm_i915_private *dev_priv, u32 interrupt_mask, u32 enabled_irq_mask) { u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); new_val = dev_priv->irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->irq_mask && !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { dev_priv->irq_mask = new_val; intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); intel_uncore_posting_read(&dev_priv->uncore, DEIMR); } } void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) { ilk_update_display_irq(i915, bits, bits); } void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) { ilk_update_display_irq(i915, bits, 0); } /** * bdw_update_port_irq - update DE port interrupt * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ void bdw_update_port_irq(struct drm_i915_private *dev_priv, u32 interrupt_mask, u32 enabled_irq_mask) { u32 new_val; u32 old_val; lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); new_val = old_val; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != old_val) { intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); } } /** * bdw_update_pipe_irq - update DE pipe interrupt * @dev_priv: driver private * @pipe: pipe whose interrupt to update * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, enum pipe pipe, u32 interrupt_mask, u32 enabled_irq_mask) { u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->de_irq_mask[pipe]) { dev_priv->de_irq_mask[pipe] = new_val; intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); } } void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits) { bdw_update_pipe_irq(i915, pipe, bits, bits); } void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits) { bdw_update_pipe_irq(i915, pipe, bits, 0); } /** * ibx_display_interrupt_update - update SDEIMR * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, u32 interrupt_mask, u32 enabled_irq_mask) { u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); lockdep_assert_held(&dev_priv->irq_lock); if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); } void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) { ibx_display_interrupt_update(i915, bits, bits); } void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) { ibx_display_interrupt_update(i915, bits, 0); } u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; u32 enable_mask = status_mask << 16; lockdep_assert_held(&dev_priv->irq_lock); if (DISPLAY_VER(dev_priv) < 5) goto out; /* * On pipe A we don't support the PSR interrupt yet, * on pipe B and C the same bit MBZ. */ if (drm_WARN_ON_ONCE(&dev_priv->drm, status_mask & PIPE_A_PSR_STATUS_VLV)) return 0; /* * On pipe B and C we don't support the PSR interrupt yet, on pipe * A the same bit is for perf counters which we don't use either. */ if (drm_WARN_ON_ONCE(&dev_priv->drm, status_mask & PIPE_B_PSR_STATUS_VLV)) return 0; enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | SPRITE0_FLIP_DONE_INT_EN_VLV | SPRITE1_FLIP_DONE_INT_EN_VLV); if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; out: drm_WARN_ONCE(&dev_priv->drm, enable_mask & ~PIPESTAT_INT_ENABLE_MASK || status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", pipe_name(pipe), enable_mask, status_mask); return enable_mask; } void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 status_mask) { i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) return; dev_priv->pipestat_irq_mask[pipe] |= status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); intel_uncore_posting_read(&dev_priv->uncore, reg); } void i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 status_mask) { i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) return; dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); intel_uncore_posting_read(&dev_priv->uncore, reg); } static bool i915_has_asle(struct drm_i915_private *dev_priv) { if (!dev_priv->display.opregion.asle) return false; return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion * @dev_priv: i915 device private */ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { if (!i915_has_asle(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); if (DISPLAY_VER(dev_priv) >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); } #if defined(CONFIG_DEBUG_FS) static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; trace_intel_pipe_crc(crtc, crcs); spin_lock(&pipe_crc->lock); /* * For some not yet identified reason, the first CRC is * bonkers. So let's just wait for the next vblank and read * out the buggy result. * * On GEN8+ sometimes the second CRC is bonkers as well, so * don't trust that one either. */ if (pipe_crc->skipped <= 0 || (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); return; } spin_unlock(&pipe_crc->lock); drm_crtc_add_crc_entry(&crtc->base, true, drm_crtc_accurate_vblank_count(&crtc->base), crcs); } #else static inline void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) {} #endif static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); struct drm_crtc_state *crtc_state = crtc->base.state; struct drm_pending_vblank_event *e = crtc_state->event; struct drm_device *dev = &i915->drm; unsigned long irqflags; spin_lock_irqsave(&dev->event_lock, irqflags); crtc_state->event = NULL; drm_crtc_send_vblank_event(&crtc->base, e); spin_unlock_irqrestore(&dev->event_lock, irqflags); } static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { display_pipe_crc_irq_handler(dev_priv, pipe, intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 0, 0, 0, 0); } static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { display_pipe_crc_irq_handler(dev_priv, pipe, intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); } static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 res1, res2; if (DISPLAY_VER(dev_priv) >= 3) res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); else res1 = 0; if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); else res2 = 0; display_pipe_crc_irq_handler(dev_priv, pipe, intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), res1, res2); } void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; for_each_pipe(dev_priv, pipe) { intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); dev_priv->pipestat_irq_mask[pipe] = 0; } } void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { enum pipe pipe; spin_lock(&dev_priv->irq_lock); if (!dev_priv->display_irqs_enabled) { spin_unlock(&dev_priv->irq_lock); return; } for_each_pipe(dev_priv, pipe) { i915_reg_t reg; u32 status_mask, enable_mask, iir_bit = 0; /* * PIPESTAT bits get signalled even when the interrupt is * disabled with the mask bits, and some of the status bits do * not generate interrupts at all (like the underrun bit). Hence * we need to be careful that we only handle what we want to * handle. */ /* fifo underruns are filterered in the underrun handler. */ status_mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { default: case PIPE_A: iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; break; case PIPE_B: iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; break; case PIPE_C: iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; break; } if (iir & iir_bit) status_mask |= dev_priv->pipestat_irq_mask[pipe]; if (!status_mask) continue; reg = PIPESTAT(pipe); pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); /* * Clear the PIPE*STAT regs before the IIR * * Toggle the enable bits to make sure we get an * edge in the ISR pipe event bit if we don't clear * all the enabled status bits. Otherwise the edge * triggered IIR on i965/g4x wouldn't notice that * an interrupt is still pending. */ if (pipe_stats[pipe]) { intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); intel_uncore_write(&dev_priv->uncore, reg, enable_mask); } } spin_unlock(&dev_priv->irq_lock); } void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, u16 iir, u32 pipe_stats[I915_MAX_PIPES]) { enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } } void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { bool blc_event = false; enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev_priv); } void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { bool blc_event = false; enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev_priv); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) intel_gmbus_irq_handler(dev_priv); } void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) flip_done_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) intel_gmbus_irq_handler(dev_priv); } static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; ibx_hpd_irq_handler(dev_priv, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", port_name(port)); } if (pch_iir & SDE_AUX_MASK) intel_dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS) intel_gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_HDCP_MASK) drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) drm_err(&dev_priv->drm, "PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) { for_each_pipe(dev_priv, pipe) drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); } if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) drm_dbg(&dev_priv->drm, "PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); if (pch_iir & SDE_TRANSB_FIFO_UNDER) intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); } static void ivb_err_int_handler(struct drm_i915_private *dev_priv) { u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); enum pipe pipe; if (err_int & ERR_INT_POISON) drm_err(&dev_priv->drm, "Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { if (IS_IVYBRIDGE(dev_priv)) ivb_pipe_crc_irq_handler(dev_priv, pipe); else hsw_pipe_crc_irq_handler(dev_priv, pipe); } } intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); } static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) { u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); enum pipe pipe; if (serr_int & SERR_INT_POISON) drm_err(&dev_priv->drm, "PCH poison interrupt\n"); for_each_pipe(dev_priv, pipe) if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); } static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; ibx_hpd_irq_handler(dev_priv, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> SDE_AUDIO_POWER_SHIFT_CPT); drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", port_name(port)); } if (pch_iir & SDE_AUX_MASK_CPT) intel_dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS_CPT) intel_gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); if (pch_iir & SDE_AUDIO_CP_CHG_CPT) drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) { for_each_pipe(dev_priv, pipe) drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); } if (pch_iir & SDE_ERROR_CPT) cpt_serr_int_handler(dev_priv); } void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) { enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) ilk_hpd_irq_handler(dev_priv, hotplug_trigger); if (de_iir & DE_AUX_CHANNEL_A) intel_dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE) intel_opregion_asle_intr(dev_priv); if (de_iir & DE_POISON) drm_err(&dev_priv->drm, "Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe)) intel_handle_vblank(dev_priv, pipe); if (de_iir & DE_PLANE_FLIP_DONE(pipe)) flip_done_handler(dev_priv, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) i9xx_pipe_crc_irq_handler(dev_priv, pipe); } /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); if (HAS_PCH_CPT(dev_priv)) cpt_irq_handler(dev_priv, pch_iir); else ibx_irq_handler(dev_priv, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); } if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) gen5_rps_irq_handler(&to_gt(dev_priv)->rps); } void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) { enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) ilk_hpd_irq_handler(dev_priv, hotplug_trigger); if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev_priv); if (de_iir & DE_EDP_PSR_INT_HSW) { struct intel_encoder *encoder; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 psr_iir; psr_iir = intel_uncore_rmw(&dev_priv->uncore, EDP_PSR_IIR, 0, 0); intel_psr_irq_handler(intel_dp, psr_iir); break; } } if (de_iir & DE_AUX_CHANNEL_A_IVB) intel_dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(dev_priv); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) intel_handle_vblank(dev_priv, pipe); if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) flip_done_handler(dev_priv, pipe); } /* check event from PCH */ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); cpt_irq_handler(dev_priv, pch_iir); /* clear PCH hotplug event before clear CPU irq */ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); } } static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) { u32 mask; if (DISPLAY_VER(dev_priv) >= 14) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB; else if (DISPLAY_VER(dev_priv) >= 13) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | XELPD_DE_PORT_AUX_DDID | XELPD_DE_PORT_AUX_DDIE | TGL_DE_PORT_AUX_USBC1 | TGL_DE_PORT_AUX_USBC2 | TGL_DE_PORT_AUX_USBC3 | TGL_DE_PORT_AUX_USBC4; else if (DISPLAY_VER(dev_priv) >= 12) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | TGL_DE_PORT_AUX_USBC1 | TGL_DE_PORT_AUX_USBC2 | TGL_DE_PORT_AUX_USBC3 | TGL_DE_PORT_AUX_USBC4 | TGL_DE_PORT_AUX_USBC5 | TGL_DE_PORT_AUX_USBC6; mask = GEN8_AUX_CHANNEL_A; if (DISPLAY_VER(dev_priv) >= 9) mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (DISPLAY_VER(dev_priv) == 11) { mask |= ICL_AUX_CHANNEL_F; mask |= ICL_AUX_CHANNEL_E; } return mask; } static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) return RKL_DE_PIPE_IRQ_FAULT_ERRORS; else if (DISPLAY_VER(dev_priv) >= 11) return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; else if (DISPLAY_VER(dev_priv) >= 9) return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; else return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) { wake_up_all(&dev_priv->display.pmdemand.waitqueue); } static void gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) { bool found = false; if (DISPLAY_VER(dev_priv) >= 14) { if (iir & (XELPDP_PMDEMAND_RSP | XELPDP_PMDEMAND_RSPTOUT_ERR)) { if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) drm_dbg(&dev_priv->drm, "Error waiting for Punit PM Demand Response\n"); intel_pmdemand_irq_handler(dev_priv); found = true; } } else if (iir & GEN8_DE_MISC_GSE) { intel_opregion_asle_intr(dev_priv); found = true; } if (iir & GEN8_DE_EDP_PSR) { struct intel_encoder *encoder; u32 psr_iir; i915_reg_t iir_reg; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (DISPLAY_VER(dev_priv) >= 12) iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); else iir_reg = EDP_PSR_IIR; psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); if (psr_iir) found = true; intel_psr_irq_handler(intel_dp, psr_iir); /* prior GEN12 only have one EDP PSR */ if (DISPLAY_VER(dev_priv) < 12) break; } } if (!found) drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); } static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, u32 te_trigger) { enum pipe pipe = INVALID_PIPE; enum transcoder dsi_trans; enum port port; u32 val; /* * Incase of dual link, TE comes from DSI_1 * this is to check if dual link is enabled */ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); val &= PORT_SYNC_MODE_ENABLE; /* * if dual link is enabled, then read DSI_0 * transcoder registers */ port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? PORT_A : PORT_B; dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; /* Check if DSI configured in command mode */ val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); val = val & OP_MODE_MASK; if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); return; } /* Get PIPE for handling VBLANK event */ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); switch (val & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: pipe = PIPE_A; break; case TRANS_DDI_EDP_INPUT_B_ONOFF: pipe = PIPE_B; break; case TRANS_DDI_EDP_INPUT_C_ONOFF: pipe = PIPE_C; break; default: drm_err(&dev_priv->drm, "Invalid PIPE\n"); return; } intel_handle_vblank(dev_priv, pipe); /* clear TE in dsi IIR */ port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); } static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 9) return GEN9_PIPE_PLANE1_FLIP_DONE; else return GEN8_PIPE_PRIMARY_FLIP_DONE; } u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) { u32 mask = GEN8_PIPE_FIFO_UNDERRUN; if (DISPLAY_VER(dev_priv) >= 13) mask |= XELPD_PIPE_SOFT_UNDERRUN | XELPD_PIPE_HARD_UNDERRUN; return mask; } static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) { u32 pica_ier = 0; *pica_iir = 0; *pch_iir = intel_de_read(i915, SDEIIR); if (!*pch_iir) return; /** * PICA IER must be disabled/re-enabled around clearing PICA IIR and * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set * their flags both in the PICA and SDE IIR. */ if (*pch_iir & SDE_PICAINTERRUPT) { drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP); pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); } intel_de_write(i915, SDEIIR, *pch_iir); if (pica_ier) intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); } void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { u32 iir; enum pipe pipe; drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); if (master_ctl & GEN8_DE_MISC_IRQ) { iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); if (iir) { intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); gen8_de_misc_irq_handler(dev_priv, iir); } else { drm_err_ratelimited(&dev_priv->drm, "The master control interrupt lied (DE MISC)!\n"); } } if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); if (iir) { intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); gen11_hpd_irq_handler(dev_priv, iir); } else { drm_err_ratelimited(&dev_priv->drm, "The master control interrupt lied, (DE HPD)!\n"); } } if (master_ctl & GEN8_DE_PORT_IRQ) { iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); if (iir) { bool found = false; intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); if (iir & gen8_de_port_aux_mask(dev_priv)) { intel_dp_aux_irq_handler(dev_priv); found = true; } if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { bxt_hpd_irq_handler(dev_priv, hotplug_trigger); found = true; } } else if (IS_BROADWELL(dev_priv)) { u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { ilk_hpd_irq_handler(dev_priv, hotplug_trigger); found = true; } } if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && (iir & BXT_DE_PORT_GMBUS)) { intel_gmbus_irq_handler(dev_priv); found = true; } if (DISPLAY_VER(dev_priv) >= 11) { u32 te_trigger = iir & (DSI0_TE | DSI1_TE); if (te_trigger) { gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); found = true; } } if (!found) drm_err_ratelimited(&dev_priv->drm, "Unexpected DE Port interrupt\n"); } else { drm_err_ratelimited(&dev_priv->drm, "The master control interrupt lied (DE PORT)!\n"); } } for_each_pipe(dev_priv, pipe) { u32 fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); if (!iir) { drm_err_ratelimited(&dev_priv->drm, "The master control interrupt lied (DE PIPE)!\n"); continue; } intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK) intel_handle_vblank(dev_priv, pipe); if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) flip_done_handler(dev_priv, pipe); if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); if (iir & gen8_de_pipe_underrun_mask(dev_priv)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); if (fault_errors) drm_err_ratelimited(&dev_priv->drm, "Fault errors on pipe %c: 0x%08x\n", pipe_name(pipe), fault_errors); } if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && master_ctl & GEN8_DE_PCH_IRQ) { u32 pica_iir; /* * FIXME(BDW): Assume for now that the new interrupt handling * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); if (iir) { if (pica_iir) xelpdp_pica_irq_handler(dev_priv, pica_iir); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_handler(dev_priv, iir); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); } else { /* * Like on previous PCH there seems to be something * fishy going on with forwarding PCH interrupts. */ drm_dbg(&dev_priv->drm, "The master control interrupt lied (SDE)!\n"); } } } u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); if (likely(iir)) raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); return iir; } void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) { if (iir & GEN11_GU_MISC_GSE) intel_opregion_asle_intr(i915); } void gen11_display_irq_handler(struct drm_i915_private *i915) { void __iomem * const regs = intel_uncore_regs(&i915->uncore); const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); disable_rpm_wakeref_asserts(&i915->runtime_pm); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. */ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); gen8_de_irq_handler(i915, disp_ctl); raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); enable_rpm_wakeref_asserts(&i915->runtime_pm); } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ int i8xx_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } int i915gm_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); /* * Vblank interrupts fail to wake the device up from C2+. * Disabling render clock gating during C-states avoids * the problem. There is a small power cost so we do this * only when vblank interrupts are actually enabled. */ if (dev_priv->vblank_enabled++ == 0) intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); return i8xx_enable_vblank(crtc); } int i965_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } int ilk_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = DISPLAY_VER(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ilk_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even though there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated. */ if (HAS_PSR(dev_priv)) drm_crtc_vblank_restore(crtc); return 0; } static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, bool enable) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum port port; if (!(intel_crtc->mode_flags & (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) return false; /* for dual link cases we consider TE from slave */ if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) port = PORT_B; else port = PORT_A; intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT); intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); return true; } int bdw_enable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; if (gen11_dsi_configure_te(crtc, true)) return 0; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even if there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated, so check only for PSR. */ if (HAS_PSR(dev_priv)) drm_crtc_vblank_restore(&crtc->base); return 0; } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ void i8xx_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void i915gm_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); i8xx_disable_vblank(crtc); if (--dev_priv->vblank_enabled == 0) intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } void i965_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_disable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void ilk_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = DISPLAY_VER(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ilk_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void bdw_disable_vblank(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; unsigned long irqflags; if (gen11_dsi_configure_te(crtc, false)) return; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void vlv_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; if (IS_CHERRYVIEW(dev_priv)) intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); i9xx_pipestat_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, VLV_); dev_priv->irq_mask = ~0u; } void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 pipestat_mask; u32 enable_mask; enum pipe pipe; pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); for_each_pipe(dev_priv, pipe) i915_enable_pipestat(dev_priv, pipe, pipestat_mask); enable_mask = I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT; if (IS_CHERRYVIEW(dev_priv)) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); dev_priv->irq_mask = ~enable_mask; GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); } void gen8_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; enum pipe pipe; if (!HAS_DISPLAY(dev_priv)) return; intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); } void gen11_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; enum pipe pipe; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); if (!HAS_DISPLAY(dev_priv)) return; intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); if (DISPLAY_VER(dev_priv) >= 12) { enum transcoder trans; for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); if (!intel_display_power_is_enabled(dev_priv, domain)) continue; intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); } } else { intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); } for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); if (DISPLAY_VER(dev_priv) >= 14) GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); else GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { struct intel_uncore *uncore = &dev_priv->uncore; u32 extra_ier = GEN8_PIPE_VBLANK | gen8_de_pipe_underrun_mask(dev_priv) | gen8_de_pipe_flip_done_mask(dev_priv); enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); if (!intel_irqs_enabled(dev_priv)) { spin_unlock_irq(&dev_priv->irq_lock); return; } for_each_pipe_masked(dev_priv, pipe, pipe_mask) GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], ~dev_priv->de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask) { struct intel_uncore *uncore = &dev_priv->uncore; enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); if (!intel_irqs_enabled(dev_priv)) { spin_unlock_irq(&dev_priv->irq_lock); return; } for_each_pipe_masked(dev_priv, pipe, pipe_mask) GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ intel_synchronize_irq(dev_priv); } /* * SDEIER is also touched by the interrupt handler to work around missed PCH * interrupts. Hence we can't update it after the interrupt handler is enabled - * instead we unconditionally enable all PCH interrupt sources here, but then * only unmask them as needed with SDEIMR. * * Note that we currently do this after installing the interrupt handler, * but before we enable the master interrupt. That should be sufficient * to avoid races with the irq handler, assuming we have MSI. Shared legacy * interrupts could still race. */ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 mask; if (HAS_PCH_NOP(dev_priv)) return; if (HAS_PCH_IBX(dev_priv)) mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; else mask = SDE_GMBUS_CPT; GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) return; dev_priv->display_irqs_enabled = true; if (intel_irqs_enabled(dev_priv)) { vlv_display_irq_reset(dev_priv); vlv_display_irq_postinstall(dev_priv); } } void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->irq_lock); if (!dev_priv->display_irqs_enabled) return; dev_priv->display_irqs_enabled = false; if (intel_irqs_enabled(dev_priv)) vlv_display_irq_reset(dev_priv); } void ilk_de_irq_postinstall(struct drm_i915_private *i915) { struct intel_uncore *uncore = &i915->uncore; u32 display_mask, extra_mask; if (GRAPHICS_VER(i915) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | DE_PLANE_FLIP_DONE_IVB(PLANE_C) | DE_PLANE_FLIP_DONE_IVB(PLANE_B) | DE_PLANE_FLIP_DONE_IVB(PLANE_A) | DE_DP_A_HOTPLUG_IVB); } else { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | DE_POISON); extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_PLANE_FLIP_DONE(PLANE_A) | DE_PLANE_FLIP_DONE(PLANE_B) | DE_DP_A_HOTPLUG); } if (IS_HASWELL(i915)) { gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); display_mask |= DE_EDP_PSR_INT_HSW; } if (IS_IRONLAKE_M(i915)) extra_mask |= DE_PCU_EVENT; i915->irq_mask = ~display_mask; ibx_irq_postinstall(i915); GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, display_mask | extra_mask); } static void mtp_irq_postinstall(struct drm_i915_private *i915); static void icp_irq_postinstall(struct drm_i915_private *i915); void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); enum pipe pipe; if (!HAS_DISPLAY(dev_priv)) return; if (DISPLAY_VER(dev_priv) >= 14) mtp_irq_postinstall(dev_priv); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev_priv); if (DISPLAY_VER(dev_priv) <= 10) de_misc_masked |= GEN8_DE_MISC_GSE; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; if (DISPLAY_VER(dev_priv) >= 14) { de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | XELPDP_PMDEMAND_RSP; } else if (DISPLAY_VER(dev_priv) >= 11) { enum port port; if (intel_bios_is_dsi_present(dev_priv, &port)) de_port_masked |= DSI0_TE | DSI1_TE; } de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | gen8_de_pipe_underrun_mask(dev_priv) | gen8_de_pipe_flip_done_mask(dev_priv); de_port_enables = de_port_masked; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; else if (IS_BROADWELL(dev_priv)) de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; if (DISPLAY_VER(dev_priv) >= 12) { enum transcoder trans; for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); if (!intel_display_power_is_enabled(dev_priv, domain)) continue; gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); } } else { gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); } for_each_pipe(dev_priv, pipe) { dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], de_pipe_enables); } GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); if (IS_DISPLAY_VER(dev_priv, 11, 13)) { u32 de_hpd_masked = 0; u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); } } static void mtp_irq_postinstall(struct drm_i915_private *i915) { struct intel_uncore *uncore = &i915->uncore; u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; u32 de_hpd_mask = XELPDP_AUX_TC_MASK; u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK; GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, de_hpd_enables); GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); } static void icp_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u32 mask = SDE_GMBUS_ICP; GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); } void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) return; gen8_de_irq_postinstall(dev_priv); intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } void dg1_de_irq_postinstall(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; gen8_de_irq_postinstall(i915); intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } void intel_display_irq_init(struct drm_i915_private *i915) { i915->drm.vblank_disable_immediate = true; /* * Most platforms treat the display irq block as an always-on power * domain. vlv/chv can disable it at runtime and need special care to * avoid writing any of the display block registers outside of the power * domain. We defer setting up the display irqs in this case to the * runtime pm. */ i915->display_irqs_enabled = true; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) i915->display_irqs_enabled = false; intel_hotplug_irq_init(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_display_irq.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/component.h> #include <linux/kernel.h> #include <drm/drm_edid.h> #include <drm/i915_component.h> #include "i915_drv.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_audio_regs.h" #include "intel_cdclk.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" /** * DOC: High Definition Audio over HDMI and Display Port * * The graphics and audio drivers together support High Definition Audio over * HDMI and Display Port. The audio programming sequences are divided into audio * codec and controller enable and disable sequences. The graphics driver * handles the audio codec sequences, while the audio driver handles the audio * controller sequences. * * The disable sequences must be performed before disabling the transcoder or * port. The enable sequences may only be performed after enabling the * transcoder and port, and after completed link training. Therefore the audio * enable/disable sequences are part of the modeset sequence. * * The codec and controller sequences could be done either parallel or serial, * but generally the ELDV/PD change in the codec sequence indicates to the audio * driver that the controller sequence should start. Indeed, most of the * co-operation between the graphics and audio drivers is handled via audio * related registers. (The notable exception is the power management, not * covered here.) * * The struct &i915_audio_component is used to interact between the graphics * and audio drivers. The struct &i915_audio_component_ops @ops in it is * defined in graphics driver and called in audio driver. The * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. */ struct intel_audio_funcs { void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void (*audio_codec_disable)(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); void (*audio_codec_get_config)(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); }; /* DP N/M table */ #define LC_810M 810000 #define LC_540M 540000 #define LC_270M 270000 #define LC_162M 162000 struct dp_aud_n_m { int sample_rate; int clock; u16 m; u16 n; }; struct hdmi_aud_ncts { int sample_rate; int clock; int n; int cts; }; /* Values according to DP 1.4 Table 2-104 */ static const struct dp_aud_n_m dp_aud_n_m[] = { { 32000, LC_162M, 1024, 10125 }, { 44100, LC_162M, 784, 5625 }, { 48000, LC_162M, 512, 3375 }, { 64000, LC_162M, 2048, 10125 }, { 88200, LC_162M, 1568, 5625 }, { 96000, LC_162M, 1024, 3375 }, { 128000, LC_162M, 4096, 10125 }, { 176400, LC_162M, 3136, 5625 }, { 192000, LC_162M, 2048, 3375 }, { 32000, LC_270M, 1024, 16875 }, { 44100, LC_270M, 784, 9375 }, { 48000, LC_270M, 512, 5625 }, { 64000, LC_270M, 2048, 16875 }, { 88200, LC_270M, 1568, 9375 }, { 96000, LC_270M, 1024, 5625 }, { 128000, LC_270M, 4096, 16875 }, { 176400, LC_270M, 3136, 9375 }, { 192000, LC_270M, 2048, 5625 }, { 32000, LC_540M, 1024, 33750 }, { 44100, LC_540M, 784, 18750 }, { 48000, LC_540M, 512, 11250 }, { 64000, LC_540M, 2048, 33750 }, { 88200, LC_540M, 1568, 18750 }, { 96000, LC_540M, 1024, 11250 }, { 128000, LC_540M, 4096, 33750 }, { 176400, LC_540M, 3136, 18750 }, { 192000, LC_540M, 2048, 11250 }, { 32000, LC_810M, 1024, 50625 }, { 44100, LC_810M, 784, 28125 }, { 48000, LC_810M, 512, 16875 }, { 64000, LC_810M, 2048, 50625 }, { 88200, LC_810M, 1568, 28125 }, { 96000, LC_810M, 1024, 16875 }, { 128000, LC_810M, 4096, 50625 }, { 176400, LC_810M, 3136, 28125 }, { 192000, LC_810M, 2048, 16875 }, }; static const struct dp_aud_n_m * audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate) { int i; for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) { if (rate == dp_aud_n_m[i].sample_rate && crtc_state->port_clock == dp_aud_n_m[i].clock) return &dp_aud_n_m[i]; } return NULL; } static const struct { int clock; u32 config; } hdmi_audio_clock[] = { { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 }, { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 }, { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 }, { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 }, }; /* HDMI N/CTS table */ #define TMDS_297M 297000 #define TMDS_296M 296703 #define TMDS_594M 594000 #define TMDS_593M 593407 static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = { { 32000, TMDS_296M, 5824, 421875 }, { 32000, TMDS_297M, 3072, 222750 }, { 32000, TMDS_593M, 5824, 843750 }, { 32000, TMDS_594M, 3072, 445500 }, { 44100, TMDS_296M, 4459, 234375 }, { 44100, TMDS_297M, 4704, 247500 }, { 44100, TMDS_593M, 8918, 937500 }, { 44100, TMDS_594M, 9408, 990000 }, { 88200, TMDS_296M, 8918, 234375 }, { 88200, TMDS_297M, 9408, 247500 }, { 88200, TMDS_593M, 17836, 937500 }, { 88200, TMDS_594M, 18816, 990000 }, { 176400, TMDS_296M, 17836, 234375 }, { 176400, TMDS_297M, 18816, 247500 }, { 176400, TMDS_593M, 35672, 937500 }, { 176400, TMDS_594M, 37632, 990000 }, { 48000, TMDS_296M, 5824, 281250 }, { 48000, TMDS_297M, 5120, 247500 }, { 48000, TMDS_593M, 5824, 562500 }, { 48000, TMDS_594M, 6144, 594000 }, { 96000, TMDS_296M, 11648, 281250 }, { 96000, TMDS_297M, 10240, 247500 }, { 96000, TMDS_593M, 11648, 562500 }, { 96000, TMDS_594M, 12288, 594000 }, { 192000, TMDS_296M, 23296, 281250 }, { 192000, TMDS_297M, 20480, 247500 }, { 192000, TMDS_593M, 23296, 562500 }, { 192000, TMDS_594M, 24576, 594000 }, }; /* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/ /* HDMI N/CTS table for 10 bit deep color(30 bpp)*/ #define TMDS_371M 371250 #define TMDS_370M 370878 static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = { { 32000, TMDS_370M, 5824, 527344 }, { 32000, TMDS_371M, 6144, 556875 }, { 44100, TMDS_370M, 8918, 585938 }, { 44100, TMDS_371M, 4704, 309375 }, { 88200, TMDS_370M, 17836, 585938 }, { 88200, TMDS_371M, 9408, 309375 }, { 176400, TMDS_370M, 35672, 585938 }, { 176400, TMDS_371M, 18816, 309375 }, { 48000, TMDS_370M, 11648, 703125 }, { 48000, TMDS_371M, 5120, 309375 }, { 96000, TMDS_370M, 23296, 703125 }, { 96000, TMDS_371M, 10240, 309375 }, { 192000, TMDS_370M, 46592, 703125 }, { 192000, TMDS_371M, 20480, 309375 }, }; /* HDMI N/CTS table for 12 bit deep color(36 bpp)*/ #define TMDS_445_5M 445500 #define TMDS_445M 445054 static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { { 32000, TMDS_445M, 5824, 632813 }, { 32000, TMDS_445_5M, 4096, 445500 }, { 44100, TMDS_445M, 8918, 703125 }, { 44100, TMDS_445_5M, 4704, 371250 }, { 88200, TMDS_445M, 17836, 703125 }, { 88200, TMDS_445_5M, 9408, 371250 }, { 176400, TMDS_445M, 35672, 703125 }, { 176400, TMDS_445_5M, 18816, 371250 }, { 48000, TMDS_445M, 5824, 421875 }, { 48000, TMDS_445_5M, 5120, 371250 }, { 96000, TMDS_445M, 11648, 421875 }, { 96000, TMDS_445_5M, 10240, 371250 }, { 192000, TMDS_445M, 23296, 421875 }, { 192000, TMDS_445_5M, 20480, 371250 }, }; /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int i; for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock) break; } if (DISPLAY_VER(i915) < 12 && adjusted_mode->crtc_clock > 148500) i = ARRAY_SIZE(hdmi_audio_clock); if (i == ARRAY_SIZE(hdmi_audio_clock)) { drm_dbg_kms(&i915->drm, "HDMI audio pixel clock setting for %d not found, falling back to defaults\n", adjusted_mode->crtc_clock); i = 1; } drm_dbg_kms(&i915->drm, "Configuring HDMI audio for pixel clock %d (0x%08x)\n", hdmi_audio_clock[i].clock, hdmi_audio_clock[i].config); return hdmi_audio_clock[i].config; } static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state, int rate) { const struct hdmi_aud_ncts *hdmi_ncts_table; int i, size; if (crtc_state->pipe_bpp == 36) { hdmi_ncts_table = hdmi_aud_ncts_36bpp; size = ARRAY_SIZE(hdmi_aud_ncts_36bpp); } else if (crtc_state->pipe_bpp == 30) { hdmi_ncts_table = hdmi_aud_ncts_30bpp; size = ARRAY_SIZE(hdmi_aud_ncts_30bpp); } else { hdmi_ncts_table = hdmi_aud_ncts_24bpp; size = ARRAY_SIZE(hdmi_aud_ncts_24bpp); } for (i = 0; i < size; i++) { if (rate == hdmi_ncts_table[i].sample_rate && crtc_state->port_clock == hdmi_ncts_table[i].clock) { return hdmi_ncts_table[i].n; } } return 0; } /* ELD buffer size in dwords */ static int g4x_eld_buffer_size(struct drm_i915_private *i915) { u32 tmp; tmp = intel_de_read(i915, G4X_AUD_CNTL_ST); return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp); } static void g4x_audio_codec_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 *eld = (u32 *)crtc_state->eld; int eld_buffer_size, len, i; u32 tmp; tmp = intel_de_read(i915, G4X_AUD_CNTL_ST); if ((tmp & G4X_ELD_VALID) == 0) return; intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0); eld_buffer_size = g4x_eld_buffer_size(i915); len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size); for (i = 0; i < len; i++) eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID); } static void g4x_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); /* Invalidate ELD */ intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_VALID, 0); intel_crtc_wait_for_next_vblank(crtc); intel_crtc_wait_for_next_vblank(crtc); } static void g4x_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const u32 *eld = (const u32 *)crtc_state->eld; int eld_buffer_size, len, i; intel_crtc_wait_for_next_vblank(crtc); intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0); eld_buffer_size = g4x_eld_buffer_size(i915); len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size); for (i = 0; i < len; i++) intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]); for (; i < eld_buffer_size; i++) intel_de_write(i915, G4X_HDMIW_HDMIEDID, 0); drm_WARN_ON(&i915->drm, (intel_de_read(i915, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0); intel_de_rmw(i915, G4X_AUD_CNTL_ST, 0, G4X_ELD_VALID); } static void hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct i915_audio_component *acomp = i915->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; int rate; u32 tmp; rate = acomp ? acomp->aud_sample_rate[port] : 0; nm = audio_config_dp_get_n_m(crtc_state, rate); if (nm) drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m, nm->n); else drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n"); tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp |= AUD_CONFIG_N_VALUE_INDEX; if (nm) { tmp &= ~AUD_CONFIG_N_MASK; tmp |= AUD_CONFIG_N(nm->n); tmp |= AUD_CONFIG_N_PROG_ENABLE; } intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp); tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); tmp &= ~AUD_CONFIG_M_MASK; tmp &= ~AUD_M_CTS_M_VALUE_INDEX; tmp &= ~AUD_M_CTS_M_PROG_ENABLE; if (nm) { tmp |= nm->m; tmp |= AUD_M_CTS_M_VALUE_INDEX; tmp |= AUD_M_CTS_M_PROG_ENABLE; } intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); } static void hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct i915_audio_component *acomp = i915->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; u32 tmp; rate = acomp ? acomp->aud_sample_rate[port] : 0; tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp |= audio_config_hdmi_pixel_clock(crtc_state); n = audio_config_hdmi_get_n(crtc_state, rate); if (n != 0) { drm_dbg_kms(&i915->drm, "using N %d\n", n); tmp &= ~AUD_CONFIG_N_MASK; tmp |= AUD_CONFIG_N(n); tmp |= AUD_CONFIG_N_PROG_ENABLE; } else { drm_dbg_kms(&i915->drm, "using automatic N\n"); } intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp); /* * Let's disable "Enable CTS or M Prog bit" * and let HW calculate the value */ tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); tmp &= ~AUD_M_CTS_M_PROG_ENABLE; tmp &= ~AUD_M_CTS_M_VALUE_INDEX; intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); } static void hsw_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { if (intel_crtc_has_dp_encoder(crtc_state)) hsw_dp_audio_config_update(encoder, crtc_state); else hsw_hdmi_audio_config_update(encoder, crtc_state); } static void hsw_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; mutex_lock(&i915->display.audio.mutex); /* Disable timestamps */ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder), AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK, AUD_CONFIG_N_PROG_ENABLE | (intel_crtc_has_dp_encoder(old_crtc_state) ? AUD_CONFIG_N_VALUE_INDEX : 0)); /* Invalidate ELD */ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD, AUDIO_ELD_VALID(cpu_transcoder), 0); intel_crtc_wait_for_next_vblank(crtc); intel_crtc_wait_for_next_vblank(crtc); /* Disable audio presence detect */ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD, AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0); mutex_unlock(&i915->display.audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); unsigned int link_clks_available, link_clks_required; unsigned int tu_data, tu_line, link_clks_active; unsigned int h_active, h_total, hblank_delta, pixel_clk; unsigned int fec_coeff, cdclk, vdsc_bpp; unsigned int link_clk, lanes; unsigned int hblank_rise; h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay; h_total = crtc_state->hw.adjusted_mode.crtc_htotal; pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; vdsc_bpp = crtc_state->dsc.compressed_bpp; cdclk = i915->display.cdclk.hw.cdclk; /* fec= 0.972261, using rounding multiplier of 1000000 */ fec_coeff = 972261; link_clk = crtc_state->port_clock; lanes = crtc_state->lane_count; drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :" "lanes = %u vdsc_bpp = %u cdclk = %u\n", h_active, link_clk, lanes, vdsc_bpp, cdclk); if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk)) return 0; link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28; link_clks_required = DIV_ROUND_UP(192000 * h_total, 1000 * pixel_clk) * (48 / lanes + 2); if (link_clks_available > link_clks_required) hblank_delta = 32; else hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk), mul_u32_u32(link_clk, cdclk)); tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000), mul_u32_u32(link_clk * lanes, fec_coeff)); tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff), mul_u32_u32(64 * pixel_clk, 1000000)); link_clks_active = (tu_line - 1) * 64 + tu_data; hblank_rise = (link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, 250) + 4) * pixel_clk / link_clk; return h_active - hblank_rise + hblank_delta; } static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state) { unsigned int h_active, h_total, pixel_clk; unsigned int link_clk, lanes; h_active = crtc_state->hw.adjusted_mode.hdisplay; h_total = crtc_state->hw.adjusted_mode.htotal; pixel_clk = crtc_state->hw.adjusted_mode.clock; link_clk = crtc_state->port_clock; lanes = crtc_state->lane_count; return ((h_total - h_active) * link_clk - 12 * pixel_clk) / (pixel_clk * (48 / lanes + 2)); } static void enable_audio_dsc_wa(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; unsigned int hblank_early_prog, samples_room; unsigned int val; if (DISPLAY_VER(i915) < 11) return; val = intel_de_read(i915, AUD_CONFIG_BE); if (DISPLAY_VER(i915) == 11) val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder); else if (DISPLAY_VER(i915) >= 12) val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder); if (crtc_state->dsc.compression_enable && crtc_state->hw.adjusted_mode.hdisplay >= 3840 && crtc_state->hw.adjusted_mode.vdisplay >= 2160) { /* Get hblank early enable value required */ val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder); hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state); if (hblank_early_prog < 32) val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32); else if (hblank_early_prog < 64) val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64); else if (hblank_early_prog < 96) val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96); else val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128); /* Get samples room value required */ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder); samples_room = calc_samples_room(crtc_state); if (samples_room < 3) val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room); else /* Program 0 i.e "All Samples available in buffer" */ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0); } intel_de_write(i915, AUD_CONFIG_BE, val); } static void hsw_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; mutex_lock(&i915->display.audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) enable_audio_dsc_wa(encoder, crtc_state); /* Enable audio presence detect */ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD, 0, AUDIO_OUTPUT_ENABLE(cpu_transcoder)); intel_crtc_wait_for_next_vblank(crtc); /* Invalidate ELD */ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD, AUDIO_ELD_VALID(cpu_transcoder), 0); /* * The audio componenent is used to convey the ELD * instead using of the hardware ELD buffer. */ /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); mutex_unlock(&i915->display.audio.mutex); } struct ibx_audio_regs { i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; }; static void ibx_audio_regs_init(struct drm_i915_private *i915, enum pipe pipe, struct ibx_audio_regs *regs) { if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); regs->aud_config = VLV_AUD_CFG(pipe); regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe); regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2; } else if (HAS_PCH_CPT(i915)) { regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); regs->aud_config = CPT_AUD_CFG(pipe); regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe); regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; } else if (HAS_PCH_IBX(i915)) { regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); regs->aud_config = IBX_AUD_CFG(pipe); regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe); regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2; } } static void ibx_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; struct ibx_audio_regs regs; if (drm_WARN_ON(&i915->drm, port == PORT_A)) return; ibx_audio_regs_init(i915, pipe, &regs); mutex_lock(&i915->display.audio.mutex); /* Disable timestamps */ intel_de_rmw(i915, regs.aud_config, AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK, AUD_CONFIG_N_PROG_ENABLE | (intel_crtc_has_dp_encoder(old_crtc_state) ? AUD_CONFIG_N_VALUE_INDEX : 0)); /* Invalidate ELD */ intel_de_rmw(i915, regs.aud_cntrl_st2, IBX_ELD_VALID(port), 0); mutex_unlock(&i915->display.audio.mutex); intel_crtc_wait_for_next_vblank(crtc); intel_crtc_wait_for_next_vblank(crtc); } static void ibx_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; struct ibx_audio_regs regs; if (drm_WARN_ON(&i915->drm, port == PORT_A)) return; intel_crtc_wait_for_next_vblank(crtc); ibx_audio_regs_init(i915, pipe, &regs); mutex_lock(&i915->display.audio.mutex); /* Invalidate ELD */ intel_de_rmw(i915, regs.aud_cntrl_st2, IBX_ELD_VALID(port), 0); /* * The audio componenent is used to convey the ELD * instead using of the hardware ELD buffer. */ /* Enable timestamps */ intel_de_rmw(i915, regs.aud_config, AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE | AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, (intel_crtc_has_dp_encoder(crtc_state) ? AUD_CONFIG_N_VALUE_INDEX : audio_config_hdmi_pixel_clock(crtc_state))); mutex_unlock(&i915->display.audio.mutex); } void intel_audio_sdp_split_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum transcoder trans = crtc_state->cpu_transcoder; if (HAS_DP20(i915)) intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT, crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0); } bool intel_audio_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!connector->eld[0]) { drm_dbg_kms(&i915->drm, "Bogus ELD on [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); return false; } BUILD_BUG_ON(sizeof(crtc_state->eld) != sizeof(connector->eld)); memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld)); crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; return true; } /** * intel_audio_codec_enable - Enable the audio codec for HD audio * @encoder: encoder on which to enable audio * @crtc_state: pointer to the current crtc state. * @conn_state: pointer to the current connector state. * * The enable sequences may only be performed after enabling the transcoder and * port, and after completed link training. */ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(conn_state->connector); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; if (!crtc_state->has_audio) return; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, crtc->base.base.id, crtc->base.name, drm_eld_size(crtc_state->eld)); if (i915->display.funcs.audio) i915->display.funcs.audio->audio_codec_enable(encoder, crtc_state, conn_state); mutex_lock(&i915->display.audio.mutex); audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = encoder; BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld)); memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld)); mutex_unlock(&i915->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, (int)port, (int)cpu_transcoder); } intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld, crtc_state->port_clock, intel_crtc_has_dp_encoder(crtc_state)); } /** * intel_audio_codec_disable - Disable the audio codec for HD audio * @encoder: encoder on which to disable audio * @old_crtc_state: pointer to the old crtc state. * @old_conn_state: pointer to the old connector state. * * The disable sequences must be performed before disabling the transcoder or * port. */ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(old_conn_state->connector); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; if (!old_crtc_state->has_audio) return; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, crtc->base.base.id, crtc->base.name); if (i915->display.funcs.audio) i915->display.funcs.audio->audio_codec_disable(encoder, old_crtc_state, old_conn_state); mutex_lock(&i915->display.audio.mutex); audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = NULL; memset(audio_state->eld, 0, sizeof(audio_state->eld)); mutex_unlock(&i915->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, (int)port, (int)cpu_transcoder); } intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false); } static void intel_acomp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; mutex_lock(&i915->display.audio.mutex); audio_state = &i915->display.audio.state[cpu_transcoder]; if (audio_state->encoder) memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld)); mutex_unlock(&i915->display.audio.mutex); } void intel_audio_codec_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (!crtc_state->has_audio) return; if (i915->display.funcs.audio) i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state); } static const struct intel_audio_funcs g4x_audio_funcs = { .audio_codec_enable = g4x_audio_codec_enable, .audio_codec_disable = g4x_audio_codec_disable, .audio_codec_get_config = g4x_audio_codec_get_config, }; static const struct intel_audio_funcs ibx_audio_funcs = { .audio_codec_enable = ibx_audio_codec_enable, .audio_codec_disable = ibx_audio_codec_disable, .audio_codec_get_config = intel_acomp_get_config, }; static const struct intel_audio_funcs hsw_audio_funcs = { .audio_codec_enable = hsw_audio_codec_enable, .audio_codec_disable = hsw_audio_codec_disable, .audio_codec_get_config = intel_acomp_get_config, }; /** * intel_audio_hooks_init - Set up chip specific audio hooks * @i915: device private */ void intel_audio_hooks_init(struct drm_i915_private *i915) { if (IS_G4X(i915)) i915->display.funcs.audio = &g4x_audio_funcs; else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) || HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915)) i915->display.funcs.audio = &ibx_audio_funcs; else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8) i915->display.funcs.audio = &hsw_audio_funcs; } struct aud_ts_cdclk_m_n { u8 m; u16 n; }; void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 13) intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0); } static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) { aud_ts->m = 60; aud_ts->n = cdclk * aud_ts->m / 24000; } void intel_audio_cdclk_change_post(struct drm_i915_private *i915) { struct aud_ts_cdclk_m_n aud_ts; if (DISPLAY_VER(i915) >= 13) { get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts); intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n); } } static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state, struct intel_crtc *crtc, bool enable) { struct intel_cdclk_state *cdclk_state; int ret; /* need to hold at least one crtc lock for the global state */ ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx); if (ret) return ret; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0; return drm_atomic_commit(&state->base); } static void glk_force_audio_cdclk(struct drm_i915_private *i915, bool enable) { struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct intel_crtc *crtc; int ret; crtc = intel_first_crtc(i915); if (!crtc) return; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(&i915->drm); if (drm_WARN_ON(&i915->drm, !state)) return; state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; retry: ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc, enable); if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } drm_WARN_ON(&i915->drm, ret); drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } static unsigned long i915_audio_component_get_power(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); intel_wakeref_t ret; /* Catch potential impedance mismatches before they occur! */ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long)); ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK); if (i915->display.audio.power_refcount++ == 0) { if (DISPLAY_VER(i915) >= 9) { intel_de_write(i915, AUD_FREQ_CNTRL, i915->display.audio.freq_cntrl); drm_dbg_kms(&i915->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", i915->display.audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ if (IS_GEMINILAKE(i915)) glk_force_audio_cdclk(i915, true); if (DISPLAY_VER(i915) >= 10) intel_de_rmw(i915, AUD_PIN_BUF_CTL, 0, AUD_PIN_BUF_ENABLE); } return ret; } static void i915_audio_component_put_power(struct device *kdev, unsigned long cookie) { struct drm_i915_private *i915 = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--i915->display.audio.power_refcount == 0) if (IS_GEMINILAKE(i915)) glk_force_audio_cdclk(i915, false); intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, cookie); } static void i915_audio_component_codec_wake_override(struct device *kdev, bool enable) { struct drm_i915_private *i915 = kdev_to_i915(kdev); unsigned long cookie; if (DISPLAY_VER(i915) < 9) return; cookie = i915_audio_component_get_power(kdev); /* * Enable/disable generating the codec wake signal, overriding the * internal logic to generate the codec wake to controller. */ intel_de_rmw(i915, HSW_AUD_CHICKENBIT, SKL_AUD_CODEC_WAKE_SIGNAL, 0); usleep_range(1000, 1500); if (enable) { intel_de_rmw(i915, HSW_AUD_CHICKENBIT, 0, SKL_AUD_CODEC_WAKE_SIGNAL); usleep_range(1000, 1500); } i915_audio_component_put_power(kdev, cookie); } /* Get CDCLK in kHz */ static int i915_audio_component_get_cdclk_freq(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915))) return -ENODEV; return i915->display.cdclk.hw.cdclk; } /* * get the intel audio state according to the parameter port and cpu_transcoder * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder], * when port is matched * MST & (cpu_transcoder < 0): this is invalid * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry) * will get the right intel_encoder with port matched * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched */ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, int port, int cpu_transcoder) { /* MST */ if (cpu_transcoder >= 0) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; if (drm_WARN_ON(&i915->drm, cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state))) return NULL; audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && encoder->type == INTEL_OUTPUT_DP_MST) return audio_state; } /* Non-MST */ if (cpu_transcoder > 0) return NULL; for_each_cpu_transcoder(i915, cpu_transcoder) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && encoder->type != INTEL_OUTPUT_DP_MST) return audio_state; } return NULL; } static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int cpu_transcoder, int rate) { struct drm_i915_private *i915 = kdev_to_i915(kdev); struct i915_audio_component *acomp = i915->display.audio.component; const struct intel_audio_state *audio_state; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; int err = 0; if (!HAS_DDI(i915)) return 0; cookie = i915_audio_component_get_power(kdev); mutex_lock(&i915->display.audio.mutex); audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); err = -ENODEV; goto unlock; } encoder = audio_state->encoder; /* FIXME stop using the legacy crtc pointer */ crtc = to_intel_crtc(encoder->base.crtc); /* port must be valid now, otherwise the cpu_transcoder will be invalid */ acomp->aud_sample_rate[port] = rate; /* FIXME get rid of the crtc->config stuff */ hsw_audio_config_update(encoder, crtc->config); unlock: mutex_unlock(&i915->display.audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } static int i915_audio_component_get_eld(struct device *kdev, int port, int cpu_transcoder, bool *enabled, unsigned char *buf, int max_bytes) { struct drm_i915_private *i915 = kdev_to_i915(kdev); const struct intel_audio_state *audio_state; int ret = 0; mutex_lock(&i915->display.audio.mutex); audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); mutex_unlock(&i915->display.audio.mutex); return -EINVAL; } *enabled = audio_state->encoder != NULL; if (*enabled) { const u8 *eld = audio_state->eld; ret = drm_eld_size(eld); memcpy(buf, eld, min(max_bytes, ret)); } mutex_unlock(&i915->display.audio.mutex); return ret; } static const struct drm_audio_component_ops i915_audio_component_ops = { .owner = THIS_MODULE, .get_power = i915_audio_component_get_power, .put_power = i915_audio_component_put_power, .codec_wake_override = i915_audio_component_codec_wake_override, .get_cdclk_freq = i915_audio_component_get_cdclk_freq, .sync_audio_rate = i915_audio_component_sync_audio_rate, .get_eld = i915_audio_component_get_eld, }; static int i915_audio_component_bind(struct device *i915_kdev, struct device *hda_kdev, void *data) { struct i915_audio_component *acomp = data; struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); int i; if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev)) return -EEXIST; if (drm_WARN_ON(&i915->drm, !device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS))) return -ENOMEM; drm_modeset_lock_all(&i915->drm); acomp->base.ops = &i915_audio_component_ops; acomp->base.dev = i915_kdev; BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; i915->display.audio.component = acomp; drm_modeset_unlock_all(&i915->drm); return 0; } static void i915_audio_component_unbind(struct device *i915_kdev, struct device *hda_kdev, void *data) { struct i915_audio_component *acomp = data; struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); drm_modeset_lock_all(&i915->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; i915->display.audio.component = NULL; drm_modeset_unlock_all(&i915->drm); device_link_remove(hda_kdev, i915_kdev); if (i915->display.audio.power_refcount) drm_err(&i915->drm, "audio power refcount %d after unbind\n", i915->display.audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { .bind = i915_audio_component_bind, .unbind = i915_audio_component_unbind, }; #define AUD_FREQ_TMODE_SHIFT 14 #define AUD_FREQ_4T 0 #define AUD_FREQ_8T (2 << AUD_FREQ_TMODE_SHIFT) #define AUD_FREQ_PULLCLKS(x) (((x) & 0x3) << 11) #define AUD_FREQ_BCLK_96M BIT(4) #define AUD_FREQ_GEN12 (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(0) | AUD_FREQ_BCLK_96M) #define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M) /** * i915_audio_component_init - initialize and register the audio component * @i915: i915 device instance * * This will register with the component framework a child component which * will bind dynamically to the snd_hda_intel driver's corresponding master * component when the latter is registered. During binding the child * initializes an instance of struct i915_audio_component which it receives * from the master. The master can then start to use the interface defined by * this struct. Each side can break the binding at any point by deregistering * its own component after which each side's component unbind callback is * called. * * We ignore any error during registration and continue with reduced * functionality (i.e. without HDMI audio). */ static void i915_audio_component_init(struct drm_i915_private *i915) { u32 aud_freq, aud_freq_init; int ret; ret = component_add_typed(i915->drm.dev, &i915_audio_component_bind_ops, I915_COMPONENT_AUDIO); if (ret < 0) { drm_err(&i915->drm, "failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ return; } if (DISPLAY_VER(i915) >= 9) { aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL); if (DISPLAY_VER(i915) >= 12) aud_freq = AUD_FREQ_GEN12; else aud_freq = aud_freq_init; /* use BIOS provided value for TGL and RKL unless it is a known bad value */ if ((IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) && aud_freq_init != AUD_FREQ_TGL_BROKEN) aud_freq = aud_freq_init; drm_dbg_kms(&i915->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); i915->display.audio.freq_cntrl = aud_freq; } /* init with current cdclk */ intel_audio_cdclk_change_post(i915); i915->display.audio.component_registered = true; } /** * i915_audio_component_cleanup - deregister the audio component * @i915: i915 device instance * * Deregisters the audio component, breaking any existing binding to the * corresponding snd_hda_intel driver's master component. */ static void i915_audio_component_cleanup(struct drm_i915_private *i915) { if (!i915->display.audio.component_registered) return; component_del(i915->drm.dev, &i915_audio_component_bind_ops); i915->display.audio.component_registered = false; } /** * intel_audio_init() - Initialize the audio driver either using * component framework or using lpe audio bridge * @i915: the i915 drm device private data * */ void intel_audio_init(struct drm_i915_private *i915) { if (intel_lpe_audio_init(i915) < 0) i915_audio_component_init(i915); } /** * intel_audio_deinit() - deinitialize the audio driver * @i915: the i915 drm device private data * */ void intel_audio_deinit(struct drm_i915_private *i915) { if (i915->display.audio.lpe.platdev != NULL) intel_lpe_audio_teardown(i915); else i915_audio_component_cleanup(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_audio.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/drm_blend.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> #include "i915_drv.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) /* * From the Sky Lake PRM: * "The Color Control Surface (CCS) contains the compression status of * the cache-line pairs. The compression state of the cache-line pair * is specified by 2 bits in the CCS. Each CCS cache-line represents * an area on the main surface of 16 x16 sets of 128 byte Y-tiled * cache-line-pairs. CCS is always Y tiled." * * Since cache line pairs refers to horizontally adjacent cache lines, * each cache line in the CCS corresponds to an area of 32x16 cache * lines on the main surface. Since each pixel is 4 bytes, this gives * us a ratio of one byte in the CCS for each 8x16 pixels in the * main surface. */ static const struct drm_format_info skl_ccs_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, }; /* * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles * in the main surface. With 4 byte pixels and each Y-tile having dimensions of * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in * the main surface. */ static const struct drm_format_info gen12_ccs_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_YUYV, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_YVYU, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_UYVY, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VYUY, .num_planes = 2, .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_NV12, .num_planes = 4, .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_P010, .num_planes = 4, .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_P012, .num_planes = 4, .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_P016, .num_planes = 4, .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true }, }; /* * Same as gen12_ccs_formats[] above, but with additional surface used * to pass Clear Color information in plane 2 with 64 bits of data. */ static const struct drm_format_info gen12_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; static const struct drm_format_info gen12_flat_ccs_cc_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, .hsub = 1, .vsub = 1, .has_alpha = true }, }; struct intel_modifier_desc { u64 modifier; struct { u8 from; u8 until; } display_ver; #define DISPLAY_VER_ALL { 0, -1 } const struct drm_format_info *formats; int format_count; #define FORMAT_OVERRIDE(format_list) \ .formats = format_list, \ .format_count = ARRAY_SIZE(format_list) u8 plane_caps; struct { u8 cc_planes:3; u8 packed_aux_planes:4; u8 planar_aux_planes:4; } ccs; }; #define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \ INTEL_PLANE_CAP_CCS_RC_CC | \ INTEL_PLANE_CAP_CCS_MC) #define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \ INTEL_PLANE_CAP_TILING_Y | \ INTEL_PLANE_CAP_TILING_Yf | \ INTEL_PLANE_CAP_TILING_4) #define INTEL_PLANE_CAP_TILING_NONE 0 static const struct intel_modifier_desc intel_modifiers[] = { { .modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS, .display_ver = { 14, 14 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC, .ccs.packed_aux_planes = BIT(1), .ccs.planar_aux_planes = BIT(2) | BIT(3), FORMAT_OVERRIDE(gen12_ccs_formats), }, { .modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS, .display_ver = { 14, 14 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC, .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(gen12_ccs_formats), }, { .modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC, .display_ver = { 14, 14 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC_CC, .ccs.cc_planes = BIT(2), .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(gen12_ccs_cc_formats), }, { .modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS, .display_ver = { 13, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC, }, { .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC, .display_ver = { 13, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC_CC, .ccs.cc_planes = BIT(1), FORMAT_OVERRIDE(gen12_flat_ccs_cc_formats), }, { .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS, .display_ver = { 13, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC, }, { .modifier = I915_FORMAT_MOD_4_TILED, .display_ver = { 13, -1 }, .plane_caps = INTEL_PLANE_CAP_TILING_4, }, { .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, .display_ver = { 12, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC, .ccs.packed_aux_planes = BIT(1), .ccs.planar_aux_planes = BIT(2) | BIT(3), FORMAT_OVERRIDE(gen12_ccs_formats), }, { .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, .display_ver = { 12, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(gen12_ccs_formats), }, { .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, .display_ver = { 12, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC, .ccs.cc_planes = BIT(2), .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(gen12_ccs_cc_formats), }, { .modifier = I915_FORMAT_MOD_Yf_TILED_CCS, .display_ver = { 9, 11 }, .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC, .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(skl_ccs_formats), }, { .modifier = I915_FORMAT_MOD_Y_TILED_CCS, .display_ver = { 9, 11 }, .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, .ccs.packed_aux_planes = BIT(1), FORMAT_OVERRIDE(skl_ccs_formats), }, { .modifier = I915_FORMAT_MOD_Yf_TILED, .display_ver = { 9, 11 }, .plane_caps = INTEL_PLANE_CAP_TILING_Yf, }, { .modifier = I915_FORMAT_MOD_Y_TILED, .display_ver = { 9, 13 }, .plane_caps = INTEL_PLANE_CAP_TILING_Y, }, { .modifier = I915_FORMAT_MOD_X_TILED, .display_ver = DISPLAY_VER_ALL, .plane_caps = INTEL_PLANE_CAP_TILING_X, }, { .modifier = DRM_FORMAT_MOD_LINEAR, .display_ver = DISPLAY_VER_ALL, }, }; static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier) { int i; for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) if (intel_modifiers[i].modifier == modifier) return &intel_modifiers[i]; return NULL; } static const struct intel_modifier_desc *lookup_modifier(u64 modifier) { const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); if (WARN_ON(!md)) return &intel_modifiers[0]; return md; } static const struct drm_format_info * lookup_format_info(const struct drm_format_info formats[], int num_formats, u32 format) { int i; for (i = 0; i < num_formats; i++) { if (formats[i].format == format) return &formats[i]; } return NULL; } /** * intel_fb_get_format_info: Get a modifier specific format information * @cmd: FB add command structure * * Returns: * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], * or %NULL if the modifier doesn't override the format. */ const struct drm_format_info * intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) { const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); if (!md || !md->formats) return NULL; return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); } static bool plane_caps_contain_any(u8 caps, u8 mask) { return caps & mask; } static bool plane_caps_contain_all(u8 caps, u8 mask) { return (caps & mask) == mask; } /** * intel_fb_is_tiled_modifier: Check if a modifier is a tiled modifier type * @modifier: Modifier to check * * Returns: * Returns %true if @modifier is a tiled modifier. */ bool intel_fb_is_tiled_modifier(u64 modifier) { return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, INTEL_PLANE_CAP_TILING_MASK); } /** * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type * @modifier: Modifier to check * * Returns: * Returns %true if @modifier is a render, render with color clear or * media compression modifier. */ bool intel_fb_is_ccs_modifier(u64 modifier) { return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, INTEL_PLANE_CAP_CCS_MASK); } /** * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type * @modifier: Modifier to check * * Returns: * Returns %true if @modifier is a render with color clear modifier. */ bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier) { return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, INTEL_PLANE_CAP_CCS_RC_CC); } /** * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type * @modifier: Modifier to check * * Returns: * Returns %true if @modifier is a media compression modifier. */ bool intel_fb_is_mc_ccs_modifier(u64 modifier) { return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, INTEL_PLANE_CAP_CCS_MC); } static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, u8 display_ver_from, u8 display_ver_until) { return md->display_ver.from <= display_ver_until && display_ver_from <= md->display_ver.until; } static bool plane_has_modifier(struct drm_i915_private *i915, u8 plane_caps, const struct intel_modifier_desc *md) { if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until)) return false; if (!plane_caps_contain_all(plane_caps, md->plane_caps)) return false; /* * Separate AuxCCS and Flat CCS modifiers to be run only on platforms * where supported. */ if (intel_fb_is_ccs_modifier(md->modifier) && HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes) return false; return true; } /** * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities * @i915: i915 device instance * @plane_caps: capabilities for the plane the modifiers are queried for * * Returns: * Returns the list of modifiers allowed by the @i915 platform and @plane_caps. * The caller must free the returned buffer. */ u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, u8 plane_caps) { u64 *list, *p; int count = 1; /* +1 for invalid modifier terminator */ int i; for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) count++; } list = kmalloc_array(count, sizeof(*list), GFP_KERNEL); if (drm_WARN_ON(&i915->drm, !list)) return NULL; p = list; for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) *p++ = intel_modifiers[i].modifier; } *p++ = DRM_FORMAT_MOD_INVALID; return list; } /** * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane * @plane: Plane to check the modifier support for * @modifier: The modifier to check the support for * * Returns: * %true if the @modifier is supported on @plane. */ bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier) { int i; for (i = 0; i < plane->base.modifier_count; i++) if (plane->base.modifiers[i] == modifier) return true; return false; } static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md, const struct drm_format_info *info) { if (!info->is_yuv) return false; if (hweight8(md->ccs.planar_aux_planes) == 2) return info->num_planes == 4; else return info->num_planes == 2; } /** * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar * @info: format to check * @modifier: modifier used with the format * * Returns: * %true if @info / @modifier is YUV semiplanar. */ bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, u64 modifier) { return format_is_yuv_semiplanar(lookup_modifier(modifier), info); } static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md, const struct drm_format_info *format) { if (format_is_yuv_semiplanar(md, format)) return md->ccs.planar_aux_planes; else return md->ccs.packed_aux_planes; } /** * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane * @fb: Framebuffer * @color_plane: color plane index to check * * Returns: * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane. */ bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) { const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } /** * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane * @fb: Framebuffer * @color_plane: color plane index to check * * Returns: * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane. */ static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) { const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); return check_modifier_display_ver_range(md, 12, 14) && ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } /** * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer * @fb: Framebuffer * * Returns: * Returns the index of the color clear plane for @fb, or -1 if @fb is not a * framebuffer using a render compression/color clear modifier. */ int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb) { const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); if (!md->ccs.cc_planes) return -1; drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1); return ilog2((int)md->ccs.cc_planes); } static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane) { return intel_fb_rc_ccs_cc_plane(fb) == color_plane; } static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) { return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && color_plane == 1; } bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) { return fb->modifier == DRM_FORMAT_MOD_LINEAR || intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) || is_gen12_ccs_cc_plane(fb, color_plane); } int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) { drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || (main_plane && main_plane >= fb->format->num_planes / 2)); return fb->format->num_planes / 2 + main_plane; } int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) { drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || ccs_plane < fb->format->num_planes / 2); if (is_gen12_ccs_cc_plane(fb, ccs_plane)) return 0; return ccs_plane - fb->format->num_planes / 2; } static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane) { int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane); unsigned int main_stride = fb->base.pitches[main_plane]; unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane); return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64; } int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); struct drm_i915_private *i915 = to_i915(fb->dev); if (md->ccs.packed_aux_planes | md->ccs.planar_aux_planes) return main_to_ccs_plane(fb, main_plane); else if (DISPLAY_VER(i915) < 11 && format_is_yuv_semiplanar(md, fb->format)) return 1; else return 0; } unsigned int intel_tile_size(const struct drm_i915_private *i915) { return DISPLAY_VER(i915) == 2 ? 2048 : 4096; } unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); unsigned int cpp = fb->format->cpp[color_plane]; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: return intel_tile_size(dev_priv); case I915_FORMAT_MOD_X_TILED: if (DISPLAY_VER(dev_priv) == 2) return 128; else return 512; case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: case I915_FORMAT_MOD_4_TILED: /* * Each 4K tile consists of 64B(8*8) subtiles, with * same shape as Y Tile(i.e 4*16B OWords) */ return 128; case I915_FORMAT_MOD_Y_TILED_CCS: if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (intel_fb_is_ccs_aux_plane(fb, color_plane) || is_gen12_ccs_cc_plane(fb, color_plane)) return 64; fallthrough; case I915_FORMAT_MOD_Y_TILED: if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv)) return 128; else return 512; case I915_FORMAT_MOD_Yf_TILED_CCS: if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_Yf_TILED: switch (cpp) { case 1: return 64; case 2: case 4: return 128; case 8: case 16: return 256; default: MISSING_CASE(cpp); return cpp; } break; default: MISSING_CASE(fb->modifier); return cpp; } } unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane) { return intel_tile_size(to_i915(fb->dev)) / intel_tile_width_bytes(fb, color_plane); } /* * Return the tile dimensions in pixel units, based on the (2 or 4 kbyte) GTT * page tile size. */ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, unsigned int *tile_width, unsigned int *tile_height) { unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane); unsigned int cpp = fb->format->cpp[color_plane]; *tile_width = tile_width_bytes / cpp; *tile_height = intel_tile_height(fb, color_plane); } /* * Return the tile dimensions in pixel units, based on the tile block size. * The block covers the full GTT page sized tile on all tiled surfaces and * it's a 64 byte portion of the tile on TGL+ CCS surfaces. */ static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_plane, unsigned int *tile_width, unsigned int *tile_height) { intel_tile_dims(fb, color_plane, tile_width, tile_height); if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) *tile_height = 1; } unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane) { unsigned int tile_width, tile_height; intel_tile_dims(fb, color_plane, &tile_width, &tile_height); return fb->pitches[color_plane] * tile_height; } unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height) { unsigned int tile_height = intel_tile_height(fb, color_plane); return ALIGN(height, tile_height); } static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) { u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & INTEL_PLANE_CAP_TILING_MASK; switch (tiling_caps) { case INTEL_PLANE_CAP_TILING_Y: return I915_TILING_Y; case INTEL_PLANE_CAP_TILING_X: return I915_TILING_X; case INTEL_PLANE_CAP_TILING_4: case INTEL_PLANE_CAP_TILING_Yf: case INTEL_PLANE_CAP_TILING_NONE: return I915_TILING_NONE; default: MISSING_CASE(tiling_caps); return I915_TILING_NONE; } } bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) { return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR; } bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) { return fb && to_i915(fb->dev)->params.enable_dpt && intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); } unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) { if (IS_I830(i915)) return 16 * 1024; else if (IS_I85X(i915)) return 256; else if (IS_I845G(i915) || IS_I865G(i915)) return 32; else return 4 * 1024; } static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 9) return 256 * 1024; else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return 128 * 1024; else if (DISPLAY_VER(dev_priv) >= 4) return 4 * 1024; else return 0; } unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); if (intel_fb_uses_dpt(fb)) return 512 * 4096; /* AUX_DIST needs only 4K alignment */ if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 4096; if (is_semiplanar_uv_plane(fb, color_plane)) { /* * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes * alignment for linear UV planes on all platforms. */ if (DISPLAY_VER(dev_priv) >= 12) { if (fb->modifier == DRM_FORMAT_MOD_LINEAR) return intel_linear_alignment(dev_priv); return intel_tile_row_size(fb, color_plane); } return 4096; } drm_WARN_ON(&dev_priv->drm, color_plane != 0); switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: return intel_linear_alignment(dev_priv); case I915_FORMAT_MOD_X_TILED: if (HAS_ASYNC_FLIPS(dev_priv)) return 256 * 1024; return 0; case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_4_TILED: case I915_FORMAT_MOD_Yf_TILED: return 1 * 1024 * 1024; case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: return 16 * 1024; default: MISSING_CASE(fb->modifier); return 0; } } void intel_fb_plane_get_subsampling(int *hsub, int *vsub, const struct drm_framebuffer *fb, int color_plane) { int main_plane; if (color_plane == 0) { *hsub = 1; *vsub = 1; return; } /* * TODO: Deduct the subsampling from the char block for all CCS * formats and planes. */ if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) { *hsub = fb->format->hsub; *vsub = fb->format->vsub; return; } main_plane = skl_ccs_to_main_plane(fb, color_plane); *hsub = drm_format_info_block_width(fb->format, color_plane) / drm_format_info_block_width(fb->format, main_plane); /* * The min stride check in the core framebuffer_check() function * assumes that format->hsub applies to every plane except for the * first plane. That's incorrect for the CCS AUX plane of the first * plane, but for the above check to pass we must define the block * width with that subsampling applied to it. Adjust the width here * accordingly, so we can calculate the actual subsampling factor. */ if (main_plane == 0) *hsub *= fb->format->hsub; *vsub = 32; } static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h) { int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ? skl_ccs_to_main_plane(&fb->base, color_plane) : 0; unsigned int main_width = fb->base.width; unsigned int main_height = fb->base.height; int main_hsub, main_vsub; int hsub, vsub; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); *w = DIV_ROUND_UP(main_width, main_hsub * hsub); *h = DIV_ROUND_UP(main_height, main_vsub * vsub); } static u32 intel_adjust_tile_offset(int *x, int *y, unsigned int tile_width, unsigned int tile_height, unsigned int tile_size, unsigned int pitch_tiles, u32 old_offset, u32 new_offset) { unsigned int pitch_pixels = pitch_tiles * tile_width; unsigned int tiles; WARN_ON(old_offset & (tile_size - 1)); WARN_ON(new_offset & (tile_size - 1)); WARN_ON(new_offset > old_offset); tiles = (old_offset - new_offset) / tile_size; *y += tiles / pitch_tiles * tile_height; *x += tiles % pitch_tiles * tile_width; /* minimize x in case it got needlessly big */ *y += *x / pitch_pixels * tile_height; *x %= pitch_pixels; return new_offset; } static u32 intel_adjust_linear_offset(int *x, int *y, unsigned int cpp, unsigned int pitch, u32 old_offset, u32 new_offset) { old_offset += *y * pitch + *x * cpp; *y = (old_offset - new_offset) / pitch; *x = ((old_offset - new_offset) - *y * pitch) / cpp; return new_offset; } static u32 intel_adjust_aligned_offset(int *x, int *y, const struct drm_framebuffer *fb, int color_plane, unsigned int rotation, unsigned int pitch, u32 old_offset, u32 new_offset) { struct drm_i915_private *i915 = to_i915(fb->dev); unsigned int cpp = fb->format->cpp[color_plane]; drm_WARN_ON(&i915->drm, new_offset > old_offset); if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int pitch_tiles; tile_size = intel_tile_size(i915); intel_tile_dims(fb, color_plane, &tile_width, &tile_height); if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; swap(tile_width, tile_height); } else { pitch_tiles = pitch / (tile_width * cpp); } intel_adjust_tile_offset(x, y, tile_width, tile_height, tile_size, pitch_tiles, old_offset, new_offset); } else { intel_adjust_linear_offset(x, y, cpp, pitch, old_offset, new_offset); } return new_offset; } /* * Adjust the tile offset by moving the difference into * the x/y offsets. */ u32 intel_plane_adjust_aligned_offset(int *x, int *y, const struct intel_plane_state *state, int color_plane, u32 old_offset, u32 new_offset) { return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, state->hw.rotation, state->view.color_plane[color_plane].mapping_stride, old_offset, new_offset); } /* * Computes the aligned offset to the base tile and adjusts * x, y. bytes per pixel is assumed to be a power-of-two. * * In the 90/270 rotated case, x and y are assumed * to be already rotated to match the rotated GTT view, and * pitch is the tile_height aligned framebuffer height. * * This function is used when computing the derived information * under intel_framebuffer, so using any of that information * here is not allowed. Anything under drm_framebuffer can be * used. This is why the user has to pass in the pitch since it * is specified in the rotated orientation. */ static u32 intel_compute_aligned_offset(struct drm_i915_private *i915, int *x, int *y, const struct drm_framebuffer *fb, int color_plane, unsigned int pitch, unsigned int rotation, u32 alignment) { unsigned int cpp = fb->format->cpp[color_plane]; u32 offset, offset_aligned; if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; tile_size = intel_tile_size(i915); intel_tile_dims(fb, color_plane, &tile_width, &tile_height); if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; swap(tile_width, tile_height); } else { pitch_tiles = pitch / (tile_width * cpp); } tile_rows = *y / tile_height; *y %= tile_height; tiles = *x / tile_width; *x %= tile_width; offset = (tile_rows * pitch_tiles + tiles) * tile_size; offset_aligned = offset; if (alignment) offset_aligned = rounddown(offset_aligned, alignment); intel_adjust_tile_offset(x, y, tile_width, tile_height, tile_size, pitch_tiles, offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; offset_aligned = offset; if (alignment) { offset_aligned = rounddown(offset_aligned, alignment); *y = (offset % alignment) / pitch; *x = ((offset % alignment) - *y * pitch) / cpp; } else { *y = *x = 0; } } return offset_aligned; } u32 intel_plane_compute_aligned_offset(int *x, int *y, const struct intel_plane_state *state, int color_plane) { struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); struct drm_i915_private *i915 = to_i915(intel_plane->base.dev); const struct drm_framebuffer *fb = state->hw.fb; unsigned int rotation = state->hw.rotation; int pitch = state->view.color_plane[color_plane].mapping_stride; u32 alignment; if (intel_plane->id == PLANE_CURSOR) alignment = intel_cursor_alignment(i915); else alignment = intel_surf_alignment(fb, color_plane); return intel_compute_aligned_offset(i915, x, y, fb, color_plane, pitch, rotation, alignment); } /* Convert the fb->offset[] into x/y offsets */ static int intel_fb_offset_to_xy(int *x, int *y, const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *i915 = to_i915(fb->dev); unsigned int height; u32 alignment; if (DISPLAY_VER(i915) >= 12 && !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && is_semiplanar_uv_plane(fb, color_plane)) alignment = intel_tile_row_size(fb, color_plane); else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) alignment = intel_tile_size(i915); else alignment = 0; if (alignment != 0 && fb->offsets[color_plane] % alignment) { drm_dbg_kms(&i915->drm, "Misaligned offset 0x%08x for color plane %d\n", fb->offsets[color_plane], color_plane); return -EINVAL; } height = drm_framebuffer_plane_height(fb->height, fb, color_plane); height = ALIGN(height, intel_tile_height(fb, color_plane)); /* Catch potential overflows early */ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), fb->offsets[color_plane])) { drm_dbg_kms(&i915->drm, "Bad offset 0x%08x or pitch %d for color plane %d\n", fb->offsets[color_plane], fb->pitches[color_plane], color_plane); return -ERANGE; } *x = 0; *y = 0; intel_adjust_aligned_offset(x, y, fb, color_plane, DRM_MODE_ROTATE_0, fb->pitches[color_plane], fb->offsets[color_plane], 0); return 0; } static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane, int x, int y) { struct drm_i915_private *i915 = to_i915(fb->dev); const struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); int main_plane; int hsub, vsub; int tile_width, tile_height; int ccs_x, ccs_y; int main_x, main_y; if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) return 0; /* * While all the tile dimensions are based on a 2k or 4k GTT page size * here the main and CCS coordinates must match only within a (64 byte * on TGL+) block inside the tile. */ intel_tile_block_dims(fb, ccs_plane, &tile_width, &tile_height); intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); tile_width *= hsub; tile_height *= vsub; ccs_x = (x * hsub) % tile_width; ccs_y = (y * vsub) % tile_height; main_plane = skl_ccs_to_main_plane(fb, ccs_plane); main_x = intel_fb->normal_view.color_plane[main_plane].x % tile_width; main_y = intel_fb->normal_view.color_plane[main_plane].y % tile_height; /* * CCS doesn't have its own x/y offset register, so the intra CCS tile * x/y offsets must match between CCS and the main surface. */ if (main_x != ccs_x || main_y != ccs_y) { drm_dbg_kms(&i915->drm, "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", main_x, main_y, ccs_x, ccs_y, intel_fb->normal_view.color_plane[main_plane].x, intel_fb->normal_view.color_plane[main_plane].y, x, y); return -EINVAL; } return 0; } static bool intel_plane_can_remap(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; int i; /* We don't want to deal with remapping with cursors */ if (plane->id == PLANE_CURSOR) return false; /* * The display engine limits already match/exceed the * render engine limits, so not much point in remapping. * Would also need to deal with the fence POT alignment * and gen2 2KiB GTT tile size. */ if (DISPLAY_VER(i915) < 4) return false; /* * The new CCS hash mode isn't compatible with remapping as * the virtual address of the pages affects the compressed data. */ if (intel_fb_is_ccs_modifier(fb->modifier)) return false; /* Linear needs a page aligned stride for remapping */ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { unsigned int alignment = intel_tile_size(i915) - 1; for (i = 0; i < fb->format->num_planes; i++) { if (fb->pitches[i] & alignment) return false; } } return true; } bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) { struct drm_i915_private *i915 = to_i915(fb->base.dev); return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) && intel_fb_uses_dpt(&fb->base); } static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) { if (drm_rotation_90_or_270(rotation)) return fb->rotated_view.color_plane[color_plane].mapping_stride; else if (intel_fb_needs_pot_stride_remap(fb)) return fb->remapped_view.color_plane[color_plane].mapping_stride; else return fb->normal_view.color_plane[color_plane].mapping_stride; } static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb); unsigned int rotation = plane_state->hw.rotation; u32 stride, max_stride; /* * No remapping for invisible planes since we don't have * an actual source viewport to remap. */ if (!plane_state->uapi.visible) return false; if (!intel_plane_can_remap(plane_state)) return false; /* * FIXME: aux plane limits on gen9+ are * unclear in Bspec, for now no checking. */ stride = intel_fb_pitch(fb, 0, rotation); max_stride = plane->max_stride(plane, fb->base.format->format, fb->base.modifier, rotation); return stride > max_stride; } static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane, int plane_width, int *x, int *y) { struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); int ret; ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane); if (ret) { drm_dbg_kms(fb->base.dev, "bad fb plane %d offset: 0x%x\n", color_plane, fb->base.offsets[color_plane]); return ret; } ret = intel_fb_check_ccs_xy(&fb->base, color_plane, *x, *y); if (ret) return ret; /* * The fence (if used) is aligned to the start of the object * so having the framebuffer wrap around across the edge of the * fenced region doesn't really work. We have no API to configure * the fence start offset within the object (nor could we probably * on gen2/3). So it's just easier if we just require that the * fb layout agrees with the fence layout. We already check that the * fb stride matches the fence stride elsewhere. */ if (color_plane == 0 && i915_gem_object_is_tiled(obj) && (*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) { drm_dbg_kms(fb->base.dev, "bad fb plane %d offset: 0x%x\n", color_plane, fb->base.offsets[color_plane]); return -EINVAL; } return 0; } static u32 calc_plane_aligned_offset(const struct intel_framebuffer *fb, int color_plane, int *x, int *y) { struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int tile_size = intel_tile_size(i915); u32 offset; offset = intel_compute_aligned_offset(i915, x, y, &fb->base, color_plane, fb->base.pitches[color_plane], DRM_MODE_ROTATE_0, tile_size); return offset / tile_size; } struct fb_plane_view_dims { unsigned int width, height; unsigned int tile_width, tile_height; }; static void init_plane_view_dims(const struct intel_framebuffer *fb, int color_plane, unsigned int width, unsigned int height, struct fb_plane_view_dims *dims) { dims->width = width; dims->height = height; intel_tile_dims(&fb->base, color_plane, &dims->tile_width, &dims->tile_height); } static unsigned int plane_view_src_stride_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims) { return DIV_ROUND_UP(fb->base.pitches[color_plane], dims->tile_width * fb->base.format->cpp[color_plane]); } static unsigned int plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane, unsigned int pitch_tiles) { if (intel_fb_needs_pot_stride_remap(fb)) { /* * ADL_P, the only platform needing a POT stride has a minimum * of 8 main surface tiles. */ return roundup_pow_of_two(max(pitch_tiles, 8u)); } else { return pitch_tiles; } } static unsigned int plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, unsigned int tile_width, unsigned int src_stride_tiles, unsigned int dst_stride_tiles) { struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int stride_tiles; if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) stride_tiles = src_stride_tiles; else stride_tiles = dst_stride_tiles; return stride_tiles * tile_width * fb->base.format->cpp[color_plane]; } static unsigned int plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x) { return DIV_ROUND_UP(x + dims->width, dims->tile_width); } static unsigned int plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int y) { return DIV_ROUND_UP(y + dims->height, dims->tile_height); } static unsigned int plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x, int y) { struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int size; size = (y + dims->height) * fb->base.pitches[color_plane] + x * fb->base.format->cpp[color_plane]; return DIV_ROUND_UP(size, intel_tile_size(i915)); } #define assign_chk_ovf(i915, var, val) ({ \ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \ (var) = (val); \ }) #define assign_bfld_chk_ovf(i915, var, val) ({ \ (var) = (val); \ drm_WARN_ON(&(i915)->drm, (var) != (val)); \ (var); \ }) static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, u32 obj_offset, u32 gtt_offset, int x, int y, struct intel_fb_view *view) { struct drm_i915_private *i915 = to_i915(fb->base.dev); struct intel_remapped_plane_info *remap_info = &view->gtt.remapped.plane[color_plane]; struct i915_color_plane_view *color_plane_info = &view->color_plane[color_plane]; unsigned int tile_width = dims->tile_width; unsigned int tile_height = dims->tile_height; unsigned int tile_size = intel_tile_size(i915); struct drm_rect r; u32 size = 0; assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset); if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) { remap_info->linear = 1; assign_chk_ovf(i915, remap_info->size, plane_view_linear_tiles(fb, color_plane, dims, x, y)); } else { remap_info->linear = 0; assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); } if (view->gtt.type == I915_GTT_VIEW_ROTATED) { drm_WARN_ON(&i915->drm, remap_info->linear); check_array_bounds(i915, view->gtt.rotated.plane, color_plane); assign_chk_ovf(i915, remap_info->dst_stride, plane_view_dst_stride_tiles(fb, color_plane, remap_info->height)); /* rotate the x/y offsets to match the GTT view */ drm_rect_init(&r, x, y, dims->width, dims->height); drm_rect_rotate(&r, remap_info->width * tile_width, remap_info->height * tile_height, DRM_MODE_ROTATE_270); color_plane_info->x = r.x1; color_plane_info->y = r.y1; color_plane_info->mapping_stride = remap_info->dst_stride * tile_height; color_plane_info->scanout_stride = color_plane_info->mapping_stride; size += remap_info->dst_stride * remap_info->width; /* rotate the tile dimensions to match the GTT view */ swap(tile_width, tile_height); } else { drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED); check_array_bounds(i915, view->gtt.remapped.plane, color_plane); if (view->gtt.remapped.plane_alignment) { unsigned int aligned_offset = ALIGN(gtt_offset, view->gtt.remapped.plane_alignment); size += aligned_offset - gtt_offset; gtt_offset = aligned_offset; } color_plane_info->x = x; color_plane_info->y = y; if (remap_info->linear) { color_plane_info->mapping_stride = fb->base.pitches[color_plane]; color_plane_info->scanout_stride = color_plane_info->mapping_stride; size += remap_info->size; } else { unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, remap_info->width); assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); color_plane_info->mapping_stride = dst_stride * tile_width * fb->base.format->cpp[color_plane]; color_plane_info->scanout_stride = plane_view_scanout_stride(fb, color_plane, tile_width, remap_info->src_stride, dst_stride); size += dst_stride * remap_info->height; } } /* * We only keep the x/y offsets, so push all of the gtt offset into * the x/y offsets. x,y will hold the first pixel of the framebuffer * plane from the start of the remapped/rotated gtt mapping. */ if (remap_info->linear) intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y, fb->base.format->cpp[color_plane], color_plane_info->mapping_stride, gtt_offset * tile_size, 0); else intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, tile_width, tile_height, tile_size, remap_info->dst_stride, gtt_offset * tile_size, 0); return size; } #undef assign_chk_ovf /* Return number of tiles @color_plane needs. */ static unsigned int calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x, int y) { unsigned int tiles; if (is_surface_linear(&fb->base, color_plane)) { tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y); } else { tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * plane_view_height_tiles(fb, color_plane, dims, y); /* * If the plane isn't horizontally tile aligned, * we need one more tile. */ if (x != 0) tiles++; } return tiles; } static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view, enum i915_gtt_view_type view_type) { memset(view, 0, sizeof(*view)); view->gtt.type = view_type; if (view_type == I915_GTT_VIEW_REMAPPED && (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)) view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE; } bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb) { if (DISPLAY_VER(to_i915(fb->base.dev)) >= 13) return false; return fb->base.modifier == I915_FORMAT_MOD_Y_TILED || fb->base.modifier == I915_FORMAT_MOD_Yf_TILED; } int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb) { struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base); u32 gtt_offset_rotated = 0; u32 gtt_offset_remapped = 0; unsigned int max_size = 0; int i, num_planes = fb->base.format->num_planes; unsigned int tile_size = intel_tile_size(i915); intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL); drm_WARN_ON(&i915->drm, intel_fb_supports_90_270_rotation(fb) && intel_fb_needs_pot_stride_remap(fb)); if (intel_fb_supports_90_270_rotation(fb)) intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED); if (intel_fb_needs_pot_stride_remap(fb)) intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED); for (i = 0; i < num_planes; i++) { struct fb_plane_view_dims view_dims; unsigned int width, height; unsigned int size; u32 offset; int x, y; int ret; /* * Plane 2 of Render Compression with Clear Color fb modifier * is consumed by the driver and not passed to DE. Skip the * arithmetic related to alignment and offset calculation. */ if (is_gen12_ccs_cc_plane(&fb->base, i)) { if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE)) continue; else return -EINVAL; } intel_fb_plane_dims(fb, i, &width, &height); ret = convert_plane_offset_to_xy(fb, i, width, &x, &y); if (ret) return ret; init_plane_view_dims(fb, i, width, height, &view_dims); /* * First pixel of the framebuffer from * the start of the normal gtt mapping. */ fb->normal_view.color_plane[i].x = x; fb->normal_view.color_plane[i].y = y; fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i]; fb->normal_view.color_plane[i].scanout_stride = fb->normal_view.color_plane[i].mapping_stride; offset = calc_plane_aligned_offset(fb, i, &x, &y); if (intel_fb_supports_90_270_rotation(fb)) gtt_offset_rotated += calc_plane_remap_info(fb, i, &view_dims, offset, gtt_offset_rotated, x, y, &fb->rotated_view); if (intel_fb_needs_pot_stride_remap(fb)) gtt_offset_remapped += calc_plane_remap_info(fb, i, &view_dims, offset, gtt_offset_remapped, x, y, &fb->remapped_view); size = calc_plane_normal_size(fb, i, &view_dims, x, y); /* how many tiles in total needed in the bo */ max_size = max(max_size, offset + size); } if (mul_u32_u32(max_size, tile_size) > obj->base.size) { drm_dbg_kms(&i915->drm, "fb too big for bo (need %llu bytes, have %zu bytes)\n", mul_u32_u32(max_size, tile_size), obj->base.size); return -EINVAL; } return 0; } static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); struct drm_framebuffer *fb = plane_state->hw.fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); unsigned int rotation = plane_state->hw.rotation; int i, num_planes = fb->format->num_planes; unsigned int src_x, src_y; unsigned int src_w, src_h; u32 gtt_offset = 0; intel_fb_view_init(i915, &plane_state->view, drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED : I915_GTT_VIEW_REMAPPED); src_x = plane_state->uapi.src.x1 >> 16; src_y = plane_state->uapi.src.y1 >> 16; src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_h = drm_rect_height(&plane_state->uapi.src) >> 16; drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ drm_rect_translate(&plane_state->uapi.src, -(src_x << 16), -(src_y << 16)); /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->uapi.src, src_w << 16, src_h << 16, DRM_MODE_ROTATE_270); for (i = 0; i < num_planes; i++) { unsigned int hsub = i ? fb->format->hsub : 1; unsigned int vsub = i ? fb->format->vsub : 1; struct fb_plane_view_dims view_dims; unsigned int width, height; unsigned int x, y; u32 offset; x = src_x / hsub; y = src_y / vsub; width = src_w / hsub; height = src_h / vsub; init_plane_view_dims(intel_fb, i, width, height, &view_dims); /* * First pixel of the src viewport from the * start of the normal gtt mapping. */ x += intel_fb->normal_view.color_plane[i].x; y += intel_fb->normal_view.color_plane[i].y; offset = calc_plane_aligned_offset(intel_fb, i, &x, &y); gtt_offset += calc_plane_remap_info(intel_fb, i, &view_dims, offset, gtt_offset, x, y, &plane_state->view); } } void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation, struct intel_fb_view *view) { if (drm_rotation_90_or_270(rotation)) *view = fb->rotated_view; else if (intel_fb_needs_pot_stride_remap(fb)) *view = fb->remapped_view; else *view = fb->normal_view; } static u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { /* * Arbitrary limit for gen4+ chosen to match the * render engine max stride. * * The new CCS hash mode makes remapping impossible */ if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || intel_fb_modifier_uses_dpt(dev_priv, modifier)) return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); else if (DISPLAY_VER(dev_priv) >= 7) return 256 * 1024; else return 128 * 1024; } static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); u32 tile_width; if (is_surface_linear(fb, color_plane)) { u32 max_stride = intel_plane_fb_max_stride(dev_priv, fb->format->format, fb->modifier); /* * To make remapping with linear generally feasible * we need the stride to be page aligned. */ if (fb->pitches[color_plane] > max_stride && !intel_fb_is_ccs_modifier(fb->modifier)) return intel_tile_size(dev_priv); else return 64; } tile_width = intel_tile_width_bytes(fb, color_plane); if (intel_fb_is_ccs_modifier(fb->modifier)) { /* * On TGL the surface stride must be 4 tile aligned, mapped by * one 64 byte cacheline on the CCS AUX surface. */ if (DISPLAY_VER(dev_priv) >= 12) tile_width *= 4; /* * Display WA #0531: skl,bxt,kbl,glk * * Render decompression and plane width > 3840 * combined with horizontal panning requires the * plane stride to be a multiple of 4. We'll just * require the entire fb to accommodate that to avoid * potential runtime errors at plane configuration time. */ else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && color_plane == 0 && fb->width > 3840) tile_width *= 4; } return tile_width; } static int intel_plane_check_stride(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; u32 stride, max_stride; /* * We ignore stride for all invisible planes that * can be remapped. Otherwise we could end up * with a false positive when the remapping didn't * kick in due the plane being invisible. */ if (intel_plane_can_remap(plane_state) && !plane_state->uapi.visible) return 0; /* FIXME other color planes? */ stride = plane_state->view.color_plane[0].mapping_stride; max_stride = plane->max_stride(plane, fb->format->format, fb->modifier, rotation); if (stride > max_stride) { DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n", fb->base.id, stride, plane->base.base.id, plane->base.name, max_stride); return -EINVAL; } return 0; } int intel_plane_compute_gtt(struct intel_plane_state *plane_state) { const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb); unsigned int rotation = plane_state->hw.rotation; if (!fb) return 0; if (intel_plane_needs_remap(plane_state)) { intel_plane_remap_gtt(plane_state); /* * Sometimes even remapping can't overcome * the stride limitations :( Can happen with * big plane sizes and suitably misaligned * offsets. */ return intel_plane_check_stride(plane_state); } intel_fb_fill_view(fb, rotation, &plane_state->view); /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->uapi.src, fb->base.width << 16, fb->base.height << 16, DRM_MODE_ROTATE_270); return intel_plane_check_stride(plane_state); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); if (intel_fb_uses_dpt(fb)) intel_dpt_destroy(intel_fb->dpt_vm); intel_frontbuffer_put(intel_fb->frontbuffer); kfree(intel_fb); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_private *i915 = to_i915(obj->base.dev); if (i915_gem_object_is_userptr(obj)) { drm_dbg(&i915->drm, "attempting to use a userptr for a framebuffer, denied\n"); return -EINVAL; } return drm_gem_handle_create(file, &obj->base, handle); } static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_file *file, unsigned int flags, unsigned int color, struct drm_clip_rect *clips, unsigned int num_clips) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); i915_gem_object_flush_if_display(obj); intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); return 0; } static const struct drm_framebuffer_funcs intel_fb_funcs = { .destroy = intel_user_framebuffer_destroy, .create_handle = intel_user_framebuffer_create_handle, .dirty = intel_user_framebuffer_dirty, }; int intel_framebuffer_init(struct intel_framebuffer *intel_fb, struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_framebuffer *fb = &intel_fb->base; u32 max_stride; unsigned int tiling, stride; int ret = -EINVAL; int i; intel_fb->frontbuffer = intel_frontbuffer_get(obj); if (!intel_fb->frontbuffer) return -ENOMEM; i915_gem_object_lock(obj, NULL); tiling = i915_gem_object_get_tiling(obj); stride = i915_gem_object_get_stride(obj); i915_gem_object_unlock(obj); if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { /* * If there's a fence, enforce that * the fb modifier and tiling mode match. */ if (tiling != I915_TILING_NONE && tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { drm_dbg_kms(&dev_priv->drm, "tiling_mode doesn't match fb modifier\n"); goto err; } } else { if (tiling == I915_TILING_X) { mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; } else if (tiling == I915_TILING_Y) { drm_dbg_kms(&dev_priv->drm, "No Y tiling for legacy addfb\n"); goto err; } } if (!drm_any_plane_has_format(&dev_priv->drm, mode_cmd->pixel_format, mode_cmd->modifier[0])) { drm_dbg_kms(&dev_priv->drm, "unsupported pixel format %p4cc / modifier 0x%llx\n", &mode_cmd->pixel_format, mode_cmd->modifier[0]); goto err; } /* * gen2/3 display engine uses the fence if present, * so the tiling mode must match the fb modifier exactly. */ if (DISPLAY_VER(dev_priv) < 4 && tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { drm_dbg_kms(&dev_priv->drm, "tiling_mode must match fb modifier exactly on gen2/3\n"); goto err; } max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, mode_cmd->modifier[0]); if (mode_cmd->pitches[0] > max_stride) { drm_dbg_kms(&dev_priv->drm, "%s pitch (%u) must be at most %d\n", mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? "tiled" : "linear", mode_cmd->pitches[0], max_stride); goto err; } /* * If there's a fence, enforce that * the fb pitch and fence stride match. */ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { drm_dbg_kms(&dev_priv->drm, "pitch (%d) must match tiling stride (%d)\n", mode_cmd->pitches[0], stride); goto err; } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ if (mode_cmd->offsets[0] != 0) { drm_dbg_kms(&dev_priv->drm, "plane 0 offset (0x%08x) must be 0\n", mode_cmd->offsets[0]); goto err; } drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); for (i = 0; i < fb->format->num_planes; i++) { u32 stride_alignment; if (mode_cmd->handles[i] != mode_cmd->handles[0]) { drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", i); goto err; } stride_alignment = intel_fb_stride_alignment(fb, i); if (fb->pitches[i] & (stride_alignment - 1)) { drm_dbg_kms(&dev_priv->drm, "plane %d pitch (%d) must be at least %u byte aligned\n", i, fb->pitches[i], stride_alignment); goto err; } if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i); if (fb->pitches[i] != ccs_aux_stride) { drm_dbg_kms(&dev_priv->drm, "ccs aux plane %d pitch (%d) must be %d\n", i, fb->pitches[i], ccs_aux_stride); goto err; } } fb->obj[i] = &obj->base; } ret = intel_fill_fb_info(dev_priv, intel_fb); if (ret) goto err; if (intel_fb_uses_dpt(fb)) { struct i915_address_space *vm; vm = intel_dpt_create(intel_fb); if (IS_ERR(vm)) { drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n"); ret = PTR_ERR(vm); goto err; } intel_fb->dpt_vm = vm; } ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); if (ret) { drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); goto err_free_dpt; } return 0; err_free_dpt: if (intel_fb_uses_dpt(fb)) intel_dpt_destroy(intel_fb->dpt_vm); err: intel_frontbuffer_put(intel_fb->frontbuffer); return ret; } struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd) { struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; struct drm_i915_private *i915; obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); if (!obj) return ERR_PTR(-ENOENT); /* object is backed with LMEM for discrete */ i915 = to_i915(obj->base.dev); if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { /* object is "remote", not in local memory */ i915_gem_object_put(obj); drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); return ERR_PTR(-EREMOTE); } fb = intel_framebuffer_create(obj, &mode_cmd); i915_gem_object_put(obj); return fb; } struct drm_framebuffer * intel_framebuffer_create(struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { struct intel_framebuffer *intel_fb; int ret; intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) return ERR_PTR(-ENOMEM); ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); if (ret) goto err; return &intel_fb->base; err: kfree(intel_fb); return ERR_PTR(ret); }
linux-master
drivers/gpu/drm/i915/display/intel_fb.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_display.h" #include "intel_display_trace.h" #include "intel_mchbar_regs.h" #include "intel_wm.h" #include "skl_watermark.h" #include "vlv_sideband.h" /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; bool sprites_enabled; bool sprites_scaled; }; struct cxsr_latency { bool is_desktop : 1; bool is_ddr3 : 1; u16 fsb_freq; u16 mem_freq; u16 display_sr; u16 display_hpll_disable; u16 cursor_sr; u16 cursor_hpll_disable; }; static const struct cxsr_latency cxsr_latency_table[] = { {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, bool is_ddr3, int fsb, int mem) { const struct cxsr_latency *latency; int i; if (fsb == 0 || mem == 0) return NULL; for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { latency = &cxsr_latency_table[i]; if (is_desktop == latency->is_desktop && is_ddr3 == latency->is_ddr3 && fsb == latency->fsb_freq && mem == latency->mem_freq) return latency; } DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); return NULL; } static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) { u32 val; vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if (enable) val &= ~FORCE_DDR_HIGH_FREQ; else val |= FORCE_DDR_HIGH_FREQ; val &= ~FORCE_DDR_LOW_FREQ; val |= FORCE_DDR_FREQ_REQ_ACK; vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) drm_err(&dev_priv->drm, "timed out waiting for Punit DDR DVFS request\n"); vlv_punit_put(dev_priv); } static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) { u32 val; vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (enable) val |= DSP_MAXFIFO_PM5_ENABLE; else val &= ~DSP_MAXFIFO_PM5_ENABLE; vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); vlv_punit_put(dev_priv); } #define FW_WM(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { bool was_enabled; u32 val; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); } else if (IS_PINEVIEW(dev_priv)) { val = intel_uncore_read(&dev_priv->uncore, DSPFW3); was_enabled = val & PINEVIEW_SELF_REFRESH_EN; if (enable) val |= PINEVIEW_SELF_REFRESH_EN; else val &= ~PINEVIEW_SELF_REFRESH_EN; intel_uncore_write(&dev_priv->uncore, DSPFW3, val); intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); } else if (IS_I915GM(dev_priv)) { /* * FIXME can't find a bit like this for 915G, and * yet it does have the related watermark in * FW_BLC_SELF. What's going on? */ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : _MASKED_BIT_DISABLE(INSTPM_SELF_EN); intel_uncore_write(&dev_priv->uncore, INSTPM, val); intel_uncore_posting_read(&dev_priv->uncore, INSTPM); } else { return false; } trace_intel_memory_cxsr(dev_priv, was_enabled, enable); drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", str_enabled_disabled(enable), str_enabled_disabled(was_enabled)); return was_enabled; } /** * intel_set_memory_cxsr - Configure CxSR state * @dev_priv: i915 device * @enable: Allow vs. disallow CxSR * * Allow or disallow the system to enter a special CxSR * (C-state self refresh) state. What typically happens in CxSR mode * is that several display FIFOs may get combined into a single larger * FIFO for a particular plane (so called max FIFO mode) to allow the * system to defer memory fetches longer, and the memory will enter * self refresh. * * Note that enabling CxSR does not guarantee that the system enter * this special mode, nor does it guarantee that the system stays * in that mode once entered. So this just allows/disallows the system * to autonomously utilize the CxSR mode. Other factors such as core * C-states will affect when/if the system actually enters/exits the * CxSR mode. * * Note that on VLV/CHV this actually only controls the max FIFO mode, * and the system is free to enter/exit memory self refresh at any time * even when the use of CxSR has been disallowed. * * While the system is actually in the CxSR/max FIFO mode, some plane * control registers will not get latched on vblank. Thus in order to * guarantee the system will respond to changes in the plane registers * we must always disallow CxSR prior to making changes to those registers. * Unfortunately the system will re-evaluate the CxSR conditions at * frame start which happens after vblank start (which is when the plane * registers would get latched), so we can't proceed with the plane update * during the same frame where we disallowed CxSR. * * Certain platforms also have a deeper HPLL SR mode. Fortunately the * HPLL SR mode depends on CxSR itself, so we don't have to hand hold * the hardware w.r.t. HPLL SR when writing to plane registers. * Disallowing just CxSR is sufficient. */ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { bool ret; mutex_lock(&dev_priv->display.wm.wm_mutex); ret = _intel_set_memory_cxsr(dev_priv, enable); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display.wm.vlv.cxsr = enable; else if (IS_G4X(dev_priv)) dev_priv->display.wm.g4x.cxsr = enable; mutex_unlock(&dev_priv->display.wm.wm_mutex); return ret; } /* * Latency for FIFO fetches is dependent on several factors: * - memory configuration (speed, channels) * - chipset * - current MCH state * It can be fairly high in some situations, so here we assume a fairly * pessimal value. It's a tradeoff between extra memory fetches (if we * set this value too high, the FIFO will fetch frequently to stay full) * and power consumption (set it too low to save power and we might see * FIFO underruns and display "flicker"). * * A value of 5us seems to be a good balance; safe for very low end * platforms but not overly aggressive on lower latency configs. */ static const int pessimal_latency_ns = 5000; #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; enum pipe pipe = crtc->pipe; int sprite0_start, sprite1_start; u32 dsparb, dsparb2, dsparb3; switch (pipe) { case PIPE_A: dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); break; case PIPE_B: dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); break; case PIPE_C: dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); break; default: MISSING_CASE(pipe); return; } fifo_state->plane[PLANE_PRIMARY] = sprite0_start; fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; fifo_state->plane[PLANE_CURSOR] = 63; } static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x7f; if (i9xx_plane == PLANE_B) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; } static int i830_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x1ff; if (i9xx_plane == PLANE_B) size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; } static int i845_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x7f; size >>= 2; /* Convert to cachelines */ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; } /* Pineview has different values for various configs */ static const struct intel_watermark_params pnv_display_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pnv_display_hplloff_wm = { .fifo_size = PINEVIEW_DISPLAY_FIFO, .max_wm = PINEVIEW_MAX_WM, .default_wm = PINEVIEW_DFT_HPLLOFF_WM, .guard_size = PINEVIEW_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pnv_cursor_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pnv_cursor_hplloff_wm = { .fifo_size = PINEVIEW_CURSOR_FIFO, .max_wm = PINEVIEW_CURSOR_MAX_WM, .default_wm = PINEVIEW_CURSOR_DFT_WM, .guard_size = PINEVIEW_CURSOR_GUARD_WM, .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i965_cursor_wm_info = { .fifo_size = I965_CURSOR_FIFO, .max_wm = I965_CURSOR_MAX_WM, .default_wm = I965_CURSOR_DFT_WM, .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i945_wm_info = { .fifo_size = I945_FIFO_SIZE, .max_wm = I915_MAX_WM, .default_wm = 1, .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i915_wm_info = { .fifo_size = I915_FIFO_SIZE, .max_wm = I915_MAX_WM, .default_wm = 1, .guard_size = 2, .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i830_a_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM, .default_wm = 1, .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i830_bc_wm_info = { .fifo_size = I855GM_FIFO_SIZE, .max_wm = I915_MAX_WM / 2, .default_wm = 1, .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i845_wm_info = { .fifo_size = I830_FIFO_SIZE, .max_wm = I915_MAX_WM, .default_wm = 1, .guard_size = 2, .cacheline_size = I830_FIFO_LINE_SIZE, }; /** * intel_wm_method1 - Method 1 / "small buffer" watermark formula * @pixel_rate: Pipe pixel rate in kHz * @cpp: Plane bytes per pixel * @latency: Memory wakeup latency in 0.1us units * * Compute the watermark using the method 1 or "small buffer" * formula. The caller may additonally add extra cachelines * to account for TLB misses and clock crossings. * * This method is concerned with the short term drain rate * of the FIFO, ie. it does not account for blanking periods * which would effectively reduce the average drain rate across * a longer period. The name "small" refers to the fact the * FIFO is relatively small compared to the amount of data * fetched. * * The FIFO level vs. time graph might look something like: * * |\ |\ * | \ | \ * __---__---__ (- plane active, _ blanking) * -> time * * or perhaps like this: * * |\|\ |\|\ * __----__----__ (- plane active, _ blanking) * -> time * * Returns: * The watermark in bytes */ static unsigned int intel_wm_method1(unsigned int pixel_rate, unsigned int cpp, unsigned int latency) { u64 ret; ret = mul_u32_u32(pixel_rate, cpp * latency); ret = DIV_ROUND_UP_ULL(ret, 10000); return ret; } /** * intel_wm_method2 - Method 2 / "large buffer" watermark formula * @pixel_rate: Pipe pixel rate in kHz * @htotal: Pipe horizontal total * @width: Plane width in pixels * @cpp: Plane bytes per pixel * @latency: Memory wakeup latency in 0.1us units * * Compute the watermark using the method 2 or "large buffer" * formula. The caller may additonally add extra cachelines * to account for TLB misses and clock crossings. * * This method is concerned with the long term drain rate * of the FIFO, ie. it does account for blanking periods * which effectively reduce the average drain rate across * a longer period. The name "large" refers to the fact the * FIFO is relatively large compared to the amount of data * fetched. * * The FIFO level vs. time graph might look something like: * * |\___ |\___ * | \___ | \___ * | \ | \ * __ --__--__--__--__--__--__ (- plane active, _ blanking) * -> time * * Returns: * The watermark in bytes */ static unsigned int intel_wm_method2(unsigned int pixel_rate, unsigned int htotal, unsigned int width, unsigned int cpp, unsigned int latency) { unsigned int ret; /* * FIXME remove once all users are computing * watermarks in the correct place. */ if (WARN_ON_ONCE(htotal == 0)) htotal = 1; ret = (latency * pixel_rate) / (htotal * 10000); ret = (ret + 1) * width * cpp; return ret; } /** * intel_calculate_wm - calculate watermark level * @pixel_rate: pixel clock * @wm: chip FIFO params * @fifo_size: size of the FIFO buffer * @cpp: bytes per pixel * @latency_ns: memory latency for the platform * * Calculate the watermark level (the level at which the display plane will * start fetching from memory again). Each chip has a different display * FIFO size and allocation, so the caller needs to figure that out and pass * in the correct intel_watermark_params structure. * * As the pixel clock runs, the FIFO will be drained at a rate that depends * on the pixel size. When it reaches the watermark level, it'll start * fetching FIFO line sized based chunks from memory until the FIFO fills * past the watermark point. If the FIFO drains completely, a FIFO underrun * will occur, and a display engine hang could result. */ static unsigned int intel_calculate_wm(int pixel_rate, const struct intel_watermark_params *wm, int fifo_size, int cpp, unsigned int latency_ns) { int entries, wm_size; /* * Note: we need to make sure we don't overflow for various clock & * latency values. * clocks go from a few thousand to several hundred thousand. * latency is usually a few thousand */ entries = intel_wm_method1(pixel_rate, cpp, latency_ns / 100); entries = DIV_ROUND_UP(entries, wm->cacheline_size) + wm->guard_size; DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); wm_size = fifo_size - entries; DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); /* Don't promote wm_size to unsigned... */ if (wm_size > wm->max_wm) wm_size = wm->max_wm; if (wm_size <= 0) wm_size = wm->default_wm; /* * Bspec seems to indicate that the value shouldn't be lower than * 'burst size + 1'. Certainly 830 is quite unhappy with low values. * Lets go for 8 which is the burst size since certain platforms * already use a hardcoded 8 (which is what the spec says should be * done). */ if (wm_size <= 8) wm_size = 8; return wm_size; } static bool is_disabling(int old, int new, int threshold) { return old >= threshold && new < threshold; } static bool is_enabling(int old, int new, int threshold) { return old < threshold && new >= threshold; } static bool intel_crtc_active(struct intel_crtc *crtc) { /* Be paranoid as we can arrive here with only partial * state retrieved from the hardware during setup. * * We can ditch the adjusted_mode.crtc_clock check as soon * as Haswell has gained clock readout/fastboot support. * * We can ditch the crtc->primary->state->fb check as soon as we can * properly reconstruct framebuffers. * * FIXME: The intel_crtc->active here should be switched to * crtc->state->active once we have proper CRTC states wired up * for atomic. */ return crtc && crtc->active && crtc->base.primary->state->fb && crtc->config->hw.adjusted_mode.crtc_clock; } static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc, *enabled = NULL; for_each_intel_crtc(&dev_priv->drm, crtc) { if (intel_crtc_active(crtc)) { if (enabled) return NULL; enabled = crtc; } } return enabled; } static void pnv_update_wm(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; const struct cxsr_latency *latency; u32 reg; unsigned int wm; latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM found, disable CxSR\n"); intel_set_memory_cxsr(dev_priv, false); return; } crtc = single_enabled_crtc(dev_priv); if (crtc) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int pixel_rate = crtc->config->pixel_rate; int cpp = fb->format->cpp[0]; /* Display SR */ wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR)); /* Display HPLL off SR */ wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); /* cursor HPLL off SR */ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); intel_set_memory_cxsr(dev_priv, true); } else { intel_set_memory_cxsr(dev_priv, false); } } /* * Documentation says: * "If the line size is small, the TLB fetches can get in the way of the * data fetches, causing some lag in the pixel data return which is not * accounted for in the above formulas. The following adjustment only * needs to be applied if eight whole lines fit in the buffer at once. * The WM is adjusted upwards by the difference between the FIFO size * and the size of 8 whole lines. This adjustment is always performed * in the actual pixel depth regardless of whether FBC is enabled or not." */ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) { int tlb_miss = fifo_size * 64 - width * cpp * 8; return max(0, tlb_miss); } static void g4x_write_wm_values(struct drm_i915_private *dev_priv, const struct g4x_wm_values *wm) { enum pipe pipe; for_each_pipe(dev_priv, pipe) trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); intel_uncore_write(&dev_priv->uncore, DSPFW2, (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | FW_WM(wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); intel_uncore_write(&dev_priv->uncore, DSPFW3, (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | FW_WM(wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR) | FW_WM(wm->hpll.plane, HPLL_SR)); intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); } #define FW_WM_VLV(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) static void vlv_write_wm_values(struct drm_i915_private *dev_priv, const struct vlv_wm_values *wm) { enum pipe pipe; for_each_pipe(dev_priv, pipe) { trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); } /* * Zero the (unused) WM1 watermarks, and also clear all the * high order bits so that there are no out of bounds values * present in the registers during the reprogramming. */ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(wm->sr.cursor, CURSOR_SR)); if (IS_CHERRYVIEW(dev_priv)) { intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); intel_uncore_write(&dev_priv->uncore, DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } else { intel_uncore_write(&dev_priv->uncore, DSPFW7, FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); intel_uncore_write(&dev_priv->uncore, DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); } #undef FW_WM_VLV static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1; } static int g4x_plane_fifo_size(enum plane_id plane_id, int level) { /* * DSPCNTR[13] supposedly controls whether the * primary plane can use the FIFO space otherwise * reserved for the sprite plane. It's not 100% clear * what the actual FIFO size is, but it looks like we * can happily set both primary and sprite watermarks * up to 127 cachelines. So that would seem to mean * that either DSPCNTR[13] doesn't do anything, or that * the total FIFO is >= 256 cachelines in size. Either * way, we don't seem to have to worry about this * repartitioning as the maximum watermark value the * register can hold for each plane is lower than the * minimum FIFO size. */ switch (plane_id) { case PLANE_CURSOR: return 63; case PLANE_PRIMARY: return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; case PLANE_SPRITE0: return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; default: MISSING_CASE(plane_id); return 0; } } static int g4x_fbc_fifo_size(int level) { switch (level) { case G4X_WM_LEVEL_SR: return 7; case G4X_WM_LEVEL_HPLL: return 15; default: MISSING_CASE(level); return 0; } } static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) return USHRT_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; /* * WaUse32BppForSRWM:ctg,elk * * The spec fails to list this restriction for the * HPLL watermark, which seems a little strange. * Let's use 32bpp for the HPLL watermark as well. */ if (plane->id == PLANE_PRIMARY && level != G4X_WM_LEVEL_NORMAL) cpp = max(cpp, 4u); pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); } else if (plane->id == PLANE_PRIMARY && level == G4X_WM_LEVEL_NORMAL) { wm = intel_wm_method1(pixel_rate, cpp, latency); } else { unsigned int small, large; small = intel_wm_method1(pixel_rate, cpp, latency); large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); wm = min(small, large); } wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), width, cpp); wm = DIV_ROUND_UP(wm, 64) + 2; return min_t(unsigned int, wm, USHRT_MAX); } static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); bool dirty = false; for (; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->plane[plane_id] != value; raw->plane[plane_id] = value; } return dirty; } static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, int level, u16 value) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); bool dirty = false; /* NORMAL level doesn't have an FBC watermark */ level = max(level, G4X_WM_LEVEL_SR); for (; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->fbc != value; raw->fbc = value; } return dirty; } static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, u32 pri_val); static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; bool dirty = false; int level; if (!intel_wm_plane_visible(crtc_state, plane_state)) { dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); if (plane_id == PLANE_PRIMARY) dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); goto out; } for (level = 0; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; int wm, max_wm; wm = g4x_compute_wm(crtc_state, plane_state, level); max_wm = g4x_plane_fifo_size(plane_id, level); if (wm > max_wm) break; dirty |= raw->plane[plane_id] != wm; raw->plane[plane_id] = wm; if (plane_id != PLANE_PRIMARY || level == G4X_WM_LEVEL_NORMAL) continue; wm = ilk_compute_fbc_wm(crtc_state, plane_state, raw->plane[plane_id]); max_wm = g4x_fbc_fifo_size(level); /* * FBC wm is not mandatory as we * can always just disable its use. */ if (wm > max_wm) wm = USHRT_MAX; dirty |= raw->fbc != wm; raw->fbc = wm; } /* mark watermarks as invalid */ dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); if (plane_id == PLANE_PRIMARY) dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); out: if (dirty) { drm_dbg_kms(&dev_priv->drm, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); if (plane_id == PLANE_PRIMARY) drm_dbg_kms(&dev_priv->drm, "FBC watermarks: SR=%d, HPLL=%d\n", crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); } return dirty; } static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, enum plane_id plane_id, int level) { const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); } static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (level >= dev_priv->display.wm.num_levels) return false; return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); } /* mark all levels starting from 'level' as invalid */ static void g4x_invalidate_wms(struct intel_crtc *crtc, struct g4x_wm_state *wm_state, int level) { if (level <= G4X_WM_LEVEL_NORMAL) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) wm_state->wm.plane[plane_id] = USHRT_MAX; } if (level <= G4X_WM_LEVEL_SR) { wm_state->cxsr = false; wm_state->sr.cursor = USHRT_MAX; wm_state->sr.plane = USHRT_MAX; wm_state->sr.fbc = USHRT_MAX; } if (level <= G4X_WM_LEVEL_HPLL) { wm_state->hpll_en = false; wm_state->hpll.cursor = USHRT_MAX; wm_state->hpll.plane = USHRT_MAX; wm_state->hpll.fbc = USHRT_MAX; } } static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, int level) { if (level < G4X_WM_LEVEL_SR) return false; if (level >= G4X_WM_LEVEL_SR && wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) return false; if (level >= G4X_WM_LEVEL_HPLL && wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) return false; return true; } static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); const struct g4x_pipe_wm *raw; enum plane_id plane_id; int level; level = G4X_WM_LEVEL_NORMAL; if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) goto out; raw = &crtc_state->wm.g4x.raw[level]; for_each_plane_id_on_crtc(crtc, plane_id) wm_state->wm.plane[plane_id] = raw->plane[plane_id]; level = G4X_WM_LEVEL_SR; if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) goto out; raw = &crtc_state->wm.g4x.raw[level]; wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; wm_state->sr.fbc = raw->fbc; wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); level = G4X_WM_LEVEL_HPLL; if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) goto out; raw = &crtc_state->wm.g4x.raw[level]; wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; wm_state->hpll.fbc = raw->fbc; wm_state->hpll_en = wm_state->cxsr; level++; out: if (level == G4X_WM_LEVEL_NORMAL) return -EINVAL; /* invalidate the higher levels */ g4x_invalidate_wms(crtc, wm_state, level); /* * Determine if the FBC watermark(s) can be used. IF * this isn't the case we prefer to disable the FBC * watermark(s) rather than disable the SR/HPLL * level(s) entirely. 'level-1' is the highest valid * level here. */ wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); return 0; } static int g4x_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; struct intel_plane *plane; unsigned int dirty = 0; int i; for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (new_plane_state->hw.crtc != &crtc->base && old_plane_state->hw.crtc != &crtc->base) continue; if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) dirty |= BIT(plane->id); } if (!dirty) return 0; return _g4x_compute_pipe_wm(crtc_state); } static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; enum plane_id plane_id; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state)) { *intermediate = *optimal; intermediate->cxsr = false; intermediate->hpll_en = false; goto out; } intermediate->cxsr = optimal->cxsr && active->cxsr && !new_crtc_state->disable_cxsr; intermediate->hpll_en = optimal->hpll_en && active->hpll_en && !new_crtc_state->disable_cxsr; intermediate->fbc_en = optimal->fbc_en && active->fbc_en; for_each_plane_id_on_crtc(crtc, plane_id) { intermediate->wm.plane[plane_id] = max(optimal->wm.plane[plane_id], active->wm.plane[plane_id]); drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); } intermediate->sr.plane = max(optimal->sr.plane, active->sr.plane); intermediate->sr.cursor = max(optimal->sr.cursor, active->sr.cursor); intermediate->sr.fbc = max(optimal->sr.fbc, active->sr.fbc); intermediate->hpll.plane = max(optimal->hpll.plane, active->hpll.plane); intermediate->hpll.cursor = max(optimal->hpll.cursor, active->hpll.cursor); intermediate->hpll.fbc = max(optimal->hpll.fbc, active->hpll.fbc); drm_WARN_ON(&dev_priv->drm, (intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr); drm_WARN_ON(&dev_priv->drm, (intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en); drm_WARN_ON(&dev_priv->drm, intermediate->sr.fbc > g4x_fbc_fifo_size(1) && intermediate->fbc_en && intermediate->cxsr); drm_WARN_ON(&dev_priv->drm, intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && intermediate->fbc_en && intermediate->hpll_en); out: /* * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) new_crtc_state->wm.need_postvbl_update = true; return 0; } static void g4x_merge_wm(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { struct intel_crtc *crtc; int num_active_pipes = 0; wm->cxsr = true; wm->hpll_en = true; wm->fbc_en = true; for_each_intel_crtc(&dev_priv->drm, crtc) { const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; if (!crtc->active) continue; if (!wm_state->cxsr) wm->cxsr = false; if (!wm_state->hpll_en) wm->hpll_en = false; if (!wm_state->fbc_en) wm->fbc_en = false; num_active_pipes++; } if (num_active_pipes != 1) { wm->cxsr = false; wm->hpll_en = false; wm->fbc_en = false; } for_each_intel_crtc(&dev_priv->drm, crtc) { const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; enum pipe pipe = crtc->pipe; wm->pipe[pipe] = wm_state->wm; if (crtc->active && wm->cxsr) wm->sr = wm_state->sr; if (crtc->active && wm->hpll_en) wm->hpll = wm_state->hpll; } } static void g4x_program_watermarks(struct drm_i915_private *dev_priv) { struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; struct g4x_wm_values new_wm = {}; g4x_merge_wm(dev_priv, &new_wm); if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) return; if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, false); g4x_write_wm_values(dev_priv, &new_wm); if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, true); *old_wm = new_wm; } static void g4x_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; g4x_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void g4x_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; g4x_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } /* latency must be in 0.1us units. */ static unsigned int vlv_wm_method2(unsigned int pixel_rate, unsigned int htotal, unsigned int width, unsigned int cpp, unsigned int latency) { unsigned int ret; ret = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); ret = DIV_ROUND_UP(ret, 64); return ret; } static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1; if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; } } static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; unsigned int pixel_rate, htotal, cpp, width, wm; if (dev_priv->display.wm.pri_latency[level] == 0) return USHRT_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { /* * FIXME the formula gives values that are * too big for the cursor FIFO, and hence we * would never be able to use cursors. For * now just hardcode the watermark. */ wm = 63; } else { wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, dev_priv->display.wm.pri_latency[level] * 10); } return min_t(unsigned int, wm, USHRT_MAX); } static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) { return (active_planes & (BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); } static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); int num_active_planes = hweight8(active_planes); const int fifo_size = 511; int fifo_extra, fifo_left = fifo_size; int sprite0_fifo_extra = 0; unsigned int total_rate; enum plane_id plane_id; /* * When enabling sprite0 after sprite1 has already been enabled * we tend to get an underrun unless sprite0 already has some * FIFO space allcoated. Hence we always allocate at least one * cacheline for sprite0 whenever sprite1 is enabled. * * All other plane enable sequences appear immune to this problem. */ if (vlv_need_sprite0_fifo_workaround(active_planes)) sprite0_fifo_extra = 1; total_rate = raw->plane[PLANE_PRIMARY] + raw->plane[PLANE_SPRITE0] + raw->plane[PLANE_SPRITE1] + sprite0_fifo_extra; if (total_rate > fifo_size) return -EINVAL; if (total_rate == 0) total_rate = 1; for_each_plane_id_on_crtc(crtc, plane_id) { unsigned int rate; if ((active_planes & BIT(plane_id)) == 0) { fifo_state->plane[plane_id] = 0; continue; } rate = raw->plane[plane_id]; fifo_state->plane[plane_id] = fifo_size * rate / total_rate; fifo_left -= fifo_state->plane[plane_id]; } fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; fifo_left -= sprite0_fifo_extra; fifo_state->plane[PLANE_CURSOR] = 63; fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); /* spread the remainder evenly */ for_each_plane_id_on_crtc(crtc, plane_id) { int plane_extra; if (fifo_left == 0) break; if ((active_planes & BIT(plane_id)) == 0) continue; plane_extra = min(fifo_extra, fifo_left); fifo_state->plane[plane_id] += plane_extra; fifo_left -= plane_extra; } drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); /* give it all to the first plane if none are active */ if (active_planes == 0) { drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); fifo_state->plane[PLANE_PRIMARY] = fifo_left; } return 0; } /* mark all levels starting from 'level' as invalid */ static void vlv_invalidate_wms(struct intel_crtc *crtc, struct vlv_wm_state *wm_state, int level) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); for (; level < dev_priv->display.wm.num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) wm_state->wm[level].plane[plane_id] = USHRT_MAX; wm_state->sr[level].cursor = USHRT_MAX; wm_state->sr[level].plane = USHRT_MAX; } } static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) { if (wm > fifo_size) return USHRT_MAX; else return fifo_size - wm; } /* * Starting from 'level' set all higher * levels to 'value' in the "raw" watermarks. */ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); bool dirty = false; for (; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; dirty |= raw->plane[plane_id] != value; raw->plane[plane_id] = value; } return dirty; } static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; int level; bool dirty = false; if (!intel_wm_plane_visible(crtc_state, plane_state)) { dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); goto out; } for (level = 0; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; int wm = vlv_compute_wm_level(crtc_state, plane_state, level); int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; if (wm > max_wm) break; dirty |= raw->plane[plane_id] != wm; raw->plane[plane_id] = wm; } /* mark all higher levels as invalid */ dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); out: if (dirty) drm_dbg_kms(&dev_priv->drm, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); return dirty; } static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, enum plane_id plane_id, int level) { const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; return raw->plane[plane_id] <= fifo_state->plane[plane_id]; } static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) { return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); } static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); int num_active_planes = hweight8(active_planes); enum plane_id plane_id; int level; /* initially allow all levels */ wm_state->num_levels = dev_priv->display.wm.num_levels; /* * Note that enabling cxsr with no primary/sprite planes * enabled can wedge the pipe. Hence we only allow cxsr * with exactly one enabled primary/sprite plane. */ wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; for (level = 0; level < wm_state->num_levels; level++) { const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) break; for_each_plane_id_on_crtc(crtc, plane_id) { wm_state->wm[level].plane[plane_id] = vlv_invert_wm_value(raw->plane[plane_id], fifo_state->plane[plane_id]); } wm_state->sr[level].plane = vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], raw->plane[PLANE_SPRITE0], raw->plane[PLANE_SPRITE1]), sr_fifo_size); wm_state->sr[level].cursor = vlv_invert_wm_value(raw->plane[PLANE_CURSOR], 63); } if (level == 0) return -EINVAL; /* limit to only levels we can actually handle */ wm_state->num_levels = level; /* invalidate the higher levels */ vlv_invalidate_wms(crtc, wm_state, level); return 0; } static int vlv_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; struct intel_plane *plane; unsigned int dirty = 0; int i; for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (new_plane_state->hw.crtc != &crtc->base && old_plane_state->hw.crtc != &crtc->base) continue; if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) dirty |= BIT(plane->id); } /* * DSPARB registers may have been reset due to the * power well being turned off. Make sure we restore * them to a consistent state even if no primary/sprite * planes are initially active. We also force a FIFO * recomputation so that we are sure to sanitize the * FIFO setting we took over from the BIOS even if there * are no active planes on the crtc. */ if (intel_crtc_needs_modeset(crtc_state)) dirty = ~0; if (!dirty) return 0; /* cursor changes don't warrant a FIFO recompute */ if (dirty & ~BIT(PLANE_CURSOR)) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct vlv_fifo_state *old_fifo_state = &old_crtc_state->wm.vlv.fifo_state; const struct vlv_fifo_state *new_fifo_state = &crtc_state->wm.vlv.fifo_state; int ret; ret = vlv_compute_fifo(crtc_state); if (ret) return ret; if (intel_crtc_needs_modeset(crtc_state) || memcmp(old_fifo_state, new_fifo_state, sizeof(*new_fifo_state)) != 0) crtc_state->fifo_changed = true; } return _vlv_compute_pipe_wm(crtc_state); } #define VLV_FIFO(plane, value) \ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) static void vlv_atomic_update_fifo(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_uncore *uncore = &dev_priv->uncore; const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; int sprite0_start, sprite1_start, fifo_size; u32 dsparb, dsparb2, dsparb3; if (!crtc_state->fifo_changed) return; sprite0_start = fifo_state->plane[PLANE_PRIMARY]; sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); drm_WARN_ON(&dev_priv->drm, fifo_size != 511); trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); /* * uncore.lock serves a double purpose here. It allows us to * use the less expensive I915_{READ,WRITE}_FW() functions, and * it protects the DSPARB registers from getting clobbered by * parallel updates from multiple pipes. * * intel_pipe_update_start() has already disabled interrupts * for us, so a plain spin_lock() is sufficient here. */ spin_lock(&uncore->lock); switch (crtc->pipe) { case PIPE_A: dsparb = intel_uncore_read_fw(uncore, DSPARB); dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | VLV_FIFO(SPRITEB, 0xff)); dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | VLV_FIFO(SPRITEB, sprite1_start)); dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | VLV_FIFO(SPRITEB_HI, 0x1)); dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); intel_uncore_write_fw(uncore, DSPARB, dsparb); intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; case PIPE_B: dsparb = intel_uncore_read_fw(uncore, DSPARB); dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | VLV_FIFO(SPRITED, 0xff)); dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | VLV_FIFO(SPRITED, sprite1_start)); dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | VLV_FIFO(SPRITED_HI, 0xff)); dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); intel_uncore_write_fw(uncore, DSPARB, dsparb); intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; case PIPE_C: dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | VLV_FIFO(SPRITEF, 0xff)); dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | VLV_FIFO(SPRITEF, sprite1_start)); dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | VLV_FIFO(SPRITEF_HI, 0xff)); dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); intel_uncore_write_fw(uncore, DSPARB3, dsparb3); intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; default: break; } intel_uncore_posting_read_fw(uncore, DSPARB); spin_unlock(&uncore->lock); } #undef VLV_FIFO static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; int level; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state)) { *intermediate = *optimal; intermediate->cxsr = false; goto out; } intermediate->num_levels = min(optimal->num_levels, active->num_levels); intermediate->cxsr = optimal->cxsr && active->cxsr && !new_crtc_state->disable_cxsr; for (level = 0; level < intermediate->num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { intermediate->wm[level].plane[plane_id] = min(optimal->wm[level].plane[plane_id], active->wm[level].plane[plane_id]); } intermediate->sr[level].plane = min(optimal->sr[level].plane, active->sr[level].plane); intermediate->sr[level].cursor = min(optimal->sr[level].cursor, active->sr[level].cursor); } vlv_invalidate_wms(crtc, intermediate, level); out: /* * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) new_crtc_state->wm.need_postvbl_update = true; return 0; } static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { struct intel_crtc *crtc; int num_active_pipes = 0; wm->level = dev_priv->display.wm.num_levels - 1; wm->cxsr = true; for_each_intel_crtc(&dev_priv->drm, crtc) { const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; if (!crtc->active) continue; if (!wm_state->cxsr) wm->cxsr = false; num_active_pipes++; wm->level = min_t(int, wm->level, wm_state->num_levels - 1); } if (num_active_pipes != 1) wm->cxsr = false; if (num_active_pipes > 1) wm->level = VLV_WM_LEVEL_PM2; for_each_intel_crtc(&dev_priv->drm, crtc) { const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; enum pipe pipe = crtc->pipe; wm->pipe[pipe] = wm_state->wm[wm->level]; if (crtc->active && wm->cxsr) wm->sr = wm_state->sr[wm->level]; wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; } } static void vlv_program_watermarks(struct drm_i915_private *dev_priv) { struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; struct vlv_wm_values new_wm = {}; vlv_merge_wm(dev_priv, &new_wm); if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) return; if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) chv_set_memory_dvfs(dev_priv, false); if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) chv_set_memory_pm5(dev_priv, false); if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, false); vlv_write_wm_values(dev_priv, &new_wm); if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, true); if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) chv_set_memory_pm5(dev_priv, true); if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) chv_set_memory_dvfs(dev_priv, true); *old_wm = new_wm; } static void vlv_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; vlv_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void vlv_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; vlv_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void i965_update_wm(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; int srwm = 1; int cursor_sr = 16; bool cxsr_enabled; /* Calc sr entries for one plane configs */ crtc = single_enabled_crtc(dev_priv); if (crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp = fb->format->cpp[0]; int entries; entries = intel_wm_method2(pixel_rate, htotal, width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) srwm = 1; srwm &= 0x1ff; drm_dbg_kms(&dev_priv->drm, "self-refresh entries: %d, wm: %d\n", entries, srwm); entries = intel_wm_method2(pixel_rate, htotal, crtc->base.cursor->state->crtc_w, 4, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size) + i965_cursor_wm_info.guard_size; cursor_sr = i965_cursor_wm_info.fifo_size - entries; if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; drm_dbg_kms(&dev_priv->drm, "self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); cxsr_enabled = true; } else { cxsr_enabled = false; /* Turn off self refresh if both pipes are enabled */ intel_set_memory_cxsr(dev_priv, false); } drm_dbg_kms(&dev_priv->drm, "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", srwm); /* 965 has limitations... */ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM(8, PLANEB) | FW_WM(8, PLANEA)); intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | FW_WM(8, PLANEC_OLD)); /* update cursor SR watermark */ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); } #undef FW_WM static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, enum i9xx_plane_id i9xx_plane) { struct intel_plane *plane; for_each_intel_plane(&i915->drm, plane) { if (plane->id == PLANE_PRIMARY && plane->i9xx_plane == i9xx_plane) return intel_crtc_for_pipe(i915, plane->pipe); } return NULL; } static void i9xx_update_wm(struct drm_i915_private *dev_priv) { const struct intel_watermark_params *wm_info; u32 fwater_lo; u32 fwater_hi; int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; struct intel_crtc *crtc; if (IS_I945GM(dev_priv)) wm_info = &i945_wm_info; else if (DISPLAY_VER(dev_priv) != 2) wm_info = &i915_wm_info; else wm_info = &i830_a_wm_info; if (DISPLAY_VER(dev_priv) == 2) fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; if (DISPLAY_VER(dev_priv) == 2) cpp = 4; else cpp = fb->format->cpp[0]; planea_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { planea_wm = fifo_size - wm_info->guard_size; if (planea_wm > (long)wm_info->max_wm) planea_wm = wm_info->max_wm; } if (DISPLAY_VER(dev_priv) == 2) wm_info = &i830_bc_wm_info; if (DISPLAY_VER(dev_priv) == 2) fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; if (DISPLAY_VER(dev_priv) == 2) cpp = 4; else cpp = fb->format->cpp[0]; planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { planeb_wm = fifo_size - wm_info->guard_size; if (planeb_wm > (long)wm_info->max_wm) planeb_wm = wm_info->max_wm; } drm_dbg_kms(&dev_priv->drm, "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); crtc = single_enabled_crtc(dev_priv); if (IS_I915GM(dev_priv) && crtc) { struct drm_i915_gem_object *obj; obj = intel_fb_obj(crtc->base.primary->state->fb); /* self-refresh seems busted with untiled */ if (!i915_gem_object_is_tiled(obj)) crtc = NULL; } /* * Overlay gets an aggressive default since video jitter is bad. */ cwm = 2; /* Play safe and disable self-refresh before adjusting watermarks. */ intel_set_memory_cxsr(dev_priv, false); /* Calc sr entries for one plane configs */ if (HAS_FW_BLC(dev_priv) && crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp; int entries; if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) cpp = 4; else cpp = fb->format->cpp[0]; entries = intel_wm_method2(pixel_rate, htotal, width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); drm_dbg_kms(&dev_priv->drm, "self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); else intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); } drm_dbg_kms(&dev_priv->drm, "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); /* Set request length to 8 cachelines per fetch */ fwater_lo = fwater_lo | (1 << 24) | (1 << 8); fwater_hi = fwater_hi | (1 << 8); intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); if (crtc) intel_set_memory_cxsr(dev_priv, true); } static void i845_update_wm(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; u32 fwater_lo; int planea_wm; crtc = single_enabled_crtc(dev_priv); if (crtc == NULL) return; planea_wm = intel_calculate_wm(crtc->config->pixel_rate, &i845_wm_info, i845_get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; drm_dbg_kms(&dev_priv->drm, "Setting FIFO watermarks - A: %d\n", planea_wm); intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); } /* latency must be in 0.1us units. */ static unsigned int ilk_wm_method1(unsigned int pixel_rate, unsigned int cpp, unsigned int latency) { unsigned int ret; ret = intel_wm_method1(pixel_rate, cpp, latency); ret = DIV_ROUND_UP(ret, 64) + 2; return ret; } /* latency must be in 0.1us units. */ static unsigned int ilk_wm_method2(unsigned int pixel_rate, unsigned int htotal, unsigned int width, unsigned int cpp, unsigned int latency) { unsigned int ret; ret = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); ret = DIV_ROUND_UP(ret, 64) + 2; return ret; } static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) { /* * Neither of these should be possible since this function shouldn't be * called if the CRTC is off or the plane is invisible. But let's be * extra paranoid to avoid a potential divide-by-zero if we screw up * elsewhere in the driver. */ if (WARN_ON(!cpp)) return 0; if (WARN_ON(!horiz_pixels)) return 0; return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; } struct ilk_wm_maximums { u16 pri; u16 spr; u16 cur; u16 fbc; }; /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, u32 mem_value, bool is_lp) { u32 method1, method2; int cpp; if (mem_value == 0) return U32_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); if (!is_lp) return method1; method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); } /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, u32 mem_value) { u32 method1, method2; int cpp; if (mem_value == 0) return U32_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); } /* * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, u32 mem_value) { int cpp; if (mem_value == 0) return U32_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; return ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); } /* Only for WM_LP. */ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, u32 pri_val) { int cpp; if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; cpp = plane_state->hw.fb->format->cpp[0]; return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, cpp); } static unsigned int ilk_display_fifo_size(const struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 8) return 3072; else if (DISPLAY_VER(dev_priv) >= 7) return 768; else return 512; } static unsigned int ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, int level, bool is_sprite) { if (DISPLAY_VER(dev_priv) >= 8) /* BDW primary/sprite plane watermarks */ return level == 0 ? 255 : 2047; else if (DISPLAY_VER(dev_priv) >= 7) /* IVB/HSW primary/sprite plane watermarks */ return level == 0 ? 127 : 1023; else if (!is_sprite) /* ILK/SNB primary plane watermarks */ return level == 0 ? 127 : 511; else /* ILK/SNB sprite plane watermarks */ return level == 0 ? 63 : 255; } static unsigned int ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) { if (DISPLAY_VER(dev_priv) >= 7) return level == 0 ? 63 : 255; else return level == 0 ? 31 : 63; } static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 8) return 31; else return 15; } /* Calculate the maximum primary/sprite plane watermark */ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, bool is_sprite) { unsigned int fifo_size = ilk_display_fifo_size(dev_priv); /* if sprites aren't enabled, sprites get nothing */ if (is_sprite && !config->sprites_enabled) return 0; /* HSW allows LP1+ watermarks even with multiple pipes */ if (level == 0 || config->num_pipes_active > 1) { fifo_size /= INTEL_NUM_PIPES(dev_priv); /* * For some reason the non self refresh * FIFO size is only half of the self * refresh FIFO size on ILK/SNB. */ if (DISPLAY_VER(dev_priv) <= 6) fifo_size /= 2; } if (config->sprites_enabled) { /* level 0 is always calculated with 1:1 split */ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { if (is_sprite) fifo_size *= 5; fifo_size /= 6; } else { fifo_size /= 2; } } /* clamp to max that the registers can hold */ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); } /* Calculate the maximum cursor plane watermark */ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config) { /* HSW LP1+ watermarks w/ multiple pipes */ if (level > 0 && config->num_pipes_active > 1) return 64; /* otherwise just report max that registers can hold */ return ilk_cursor_wm_reg_max(dev_priv, level); } static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, struct ilk_wm_maximums *max) { max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); max->cur = ilk_cursor_wm_max(dev_priv, level, config); max->fbc = ilk_fbc_wm_reg_max(dev_priv); } static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, int level, struct ilk_wm_maximums *max) { max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); max->cur = ilk_cursor_wm_reg_max(dev_priv, level); max->fbc = ilk_fbc_wm_reg_max(dev_priv); } static bool ilk_validate_wm_level(int level, const struct ilk_wm_maximums *max, struct intel_wm_level *result) { bool ret; /* already determined to be invalid? */ if (!result->enable) return false; result->enable = result->pri_val <= max->pri && result->spr_val <= max->spr && result->cur_val <= max->cur; ret = result->enable; /* * HACK until we can pre-compute everything, * and thus fail gracefully if LP0 watermarks * are exceeded... */ if (level == 0 && !result->enable) { if (result->pri_val > max->pri) DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", level, result->pri_val, max->pri); if (result->spr_val > max->spr) DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", level, result->spr_val, max->spr); if (result->cur_val > max->cur) DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", level, result->cur_val, max->cur); result->pri_val = min_t(u32, result->pri_val, max->pri); result->spr_val = min_t(u32, result->spr_val, max->spr); result->cur_val = min_t(u32, result->cur_val, max->cur); result->enable = true; } return ret; } static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_crtc *crtc, int level, struct intel_crtc_state *crtc_state, const struct intel_plane_state *pristate, const struct intel_plane_state *sprstate, const struct intel_plane_state *curstate, struct intel_wm_level *result) { u16 pri_latency = dev_priv->display.wm.pri_latency[level]; u16 spr_latency = dev_priv->display.wm.spr_latency[level]; u16 cur_latency = dev_priv->display.wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { pri_latency *= 5; spr_latency *= 5; cur_latency *= 5; } if (pristate) { result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, pri_latency, level); result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); } if (sprstate) result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); if (curstate) result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); result->enable = true; } static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { u64 sskpd; i915->display.wm.num_levels = 5; sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); if (wm[0] == 0) wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); } static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { u32 sskpd; i915->display.wm.num_levels = 4; sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); } static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { u32 mltr; i915->display.wm.num_levels = 3; mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, u16 wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ if (DISPLAY_VER(dev_priv) == 5) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, u16 wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ if (DISPLAY_VER(dev_priv) == 5) wm[0] = 13; } static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, u16 wm[5], u16 min) { int level; if (wm[0] >= min) return false; wm[0] = max(wm[0], min); for (level = 1; level < dev_priv->display.wm.num_levels; level++) wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); return true; } static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) { bool changed; /* * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); if (!changed) return; drm_dbg_kms(&dev_priv->drm, "WM latency values increased to avoid potential underruns\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) { /* * On some SNB machines (Thinkpad X220 Tablet at least) * LP3 usage can cause vblank interrupts to be lost. * The DEIIR bit will go high but it looks like the CPU * never gets interrupted. * * It's not clear whether other interrupt source could * be affected or if this is somehow limited to vblank * interrupts only. To play it safe we disable LP3 * watermarks entirely. */ if (dev_priv->display.wm.pri_latency[3] == 0 && dev_priv->display.wm.spr_latency[3] == 0 && dev_priv->display.wm.cur_latency[3] == 0) return; dev_priv->display.wm.pri_latency[3] = 0; dev_priv->display.wm.spr_latency[3] = 0; dev_priv->display.wm.cur_latency[3] = 0; drm_dbg_kms(&dev_priv->drm, "LP3 watermarks disabled due to potential for lost interrupts\n"); intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); else if (DISPLAY_VER(dev_priv) >= 6) snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); else ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, sizeof(dev_priv->display.wm.pri_latency)); memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, sizeof(dev_priv->display.wm.pri_latency)); intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); if (DISPLAY_VER(dev_priv) == 6) { snb_wm_latency_quirk(dev_priv); snb_wm_lp3_irq_quirk(dev_priv); } } static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, struct intel_pipe_wm *pipe_wm) { /* LP0 watermark maximums depend on this pipe alone */ const struct intel_wm_config config = { .num_pipes_active = 1, .sprites_enabled = pipe_wm->sprites_enabled, .sprites_scaled = pipe_wm->sprites_scaled, }; struct ilk_wm_maximums max; /* LP0 watermarks always use 1/2 DDB partitioning */ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); /* At least LP0 must be valid */ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); return false; } return true; } /* Compute new watermarks for the pipe */ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_pipe_wm *pipe_wm; struct intel_plane *plane; const struct intel_plane_state *plane_state; const struct intel_plane_state *pristate = NULL; const struct intel_plane_state *sprstate = NULL; const struct intel_plane_state *curstate = NULL; struct ilk_wm_maximums max; int level, usable_level; pipe_wm = &crtc_state->wm.ilk.optimal; intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) pristate = plane_state; else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) sprstate = plane_state; else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) curstate = plane_state; } pipe_wm->pipe_enabled = crtc_state->hw.active; pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); usable_level = dev_priv->display.wm.num_levels - 1; /* ILK/SNB: LP2+ watermarks only w/o sprites */ if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) usable_level = 1; /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ if (pipe_wm->sprites_scaled) usable_level = 0; memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, pristate, sprstate, curstate, &pipe_wm->wm[0]); if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) return -EINVAL; ilk_compute_wm_reg_maximums(dev_priv, 1, &max); for (level = 1; level <= usable_level; level++) { struct intel_wm_level *wm = &pipe_wm->wm[level]; ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, pristate, sprstate, curstate, wm); /* * Disable any watermark level that exceeds the * register maximums since such watermarks are * always invalid. */ if (!ilk_validate_wm_level(level, &max, wm)) { memset(wm, 0, sizeof(*wm)); break; } } return 0; } /* * Build a set of 'intermediate' watermark values that satisfy both the old * state and the new state. These can be programmed to the hardware * immediately. */ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; int level; /* * Start with the final, target watermarks, then combine with the * currently active watermarks to get values that are safe both before * and after the vblank. */ *a = new_crtc_state->wm.ilk.optimal; if (!new_crtc_state->hw.active || intel_crtc_needs_modeset(new_crtc_state) || state->skip_intermediate_wm) return 0; a->pipe_enabled |= b->pipe_enabled; a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { struct intel_wm_level *a_wm = &a->wm[level]; const struct intel_wm_level *b_wm = &b->wm[level]; a_wm->enable &= b_wm->enable; a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); } /* * We need to make sure that these merged watermark values are * actually a valid configuration themselves. If they're not, * there's no safe way to transition from the old state to * the new state, so we need to fail the atomic transaction. */ if (!ilk_validate_pipe_wm(dev_priv, a)) return -EINVAL; /* * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) new_crtc_state->wm.need_postvbl_update = true; return 0; } /* * Merge the watermarks from all active pipes for a specific level. */ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, int level, struct intel_wm_level *ret_wm) { const struct intel_crtc *crtc; ret_wm->enable = true; for_each_intel_crtc(&dev_priv->drm, crtc) { const struct intel_pipe_wm *active = &crtc->wm.active.ilk; const struct intel_wm_level *wm = &active->wm[level]; if (!active->pipe_enabled) continue; /* * The watermark values may have been used in the past, * so we must maintain them in the registers for some * time even if the level is now disabled. */ if (!wm->enable) ret_wm->enable = false; ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); } } /* * Merge all low power watermarks for all active pipes. */ static void ilk_wm_merge(struct drm_i915_private *dev_priv, const struct intel_wm_config *config, const struct ilk_wm_maximums *max, struct intel_pipe_wm *merged) { int level, num_levels = dev_priv->display.wm.num_levels; int last_enabled_level = num_levels - 1; /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && config->num_pipes_active > 1) last_enabled_level = 0; /* ILK: FBC WM must be disabled always */ merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; /* merge each WM1+ level */ for (level = 1; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; ilk_merge_wm_level(dev_priv, level, wm); if (level > last_enabled_level) wm->enable = false; else if (!ilk_validate_wm_level(level, max, wm)) /* make sure all following levels get disabled */ last_enabled_level = level - 1; /* * The spec says it is preferred to disable * FBC WMs instead of disabling a WM level. */ if (wm->fbc_val > max->fbc) { if (wm->enable) merged->fbc_wm_enabled = false; wm->fbc_val = 0; } } /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; wm->enable = false; } } } static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) { /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); } /* The value we need to program into the WM_LPx latency field */ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, int level) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) return 2 * level; else return dev_priv->display.wm.pri_latency[level]; } static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, const struct intel_pipe_wm *merged, enum intel_ddb_partitioning partitioning, struct ilk_wm_values *results) { struct intel_crtc *crtc; int level, wm_lp; results->enable_fbc_wm = merged->fbc_wm_enabled; results->partitioning = partitioning; /* LP1+ register values */ for (wm_lp = 1; wm_lp <= 3; wm_lp++) { const struct intel_wm_level *r; level = ilk_wm_lp_to_level(wm_lp, merged); r = &merged->wm[level]; /* * Maintain the watermark values even if the level is * disabled. Doing otherwise could cause underruns. */ results->wm_lp[wm_lp - 1] = WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | WM_LP_PRIMARY(r->pri_val) | WM_LP_CURSOR(r->cur_val); if (r->enable) results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; if (DISPLAY_VER(dev_priv) >= 8) results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); else results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); /* * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { drm_WARN_ON(&dev_priv->drm, wm_lp != 1); results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; } } /* LP0 register values */ for_each_intel_crtc(&dev_priv->drm, crtc) { enum pipe pipe = crtc->pipe; const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; const struct intel_wm_level *r = &pipe_wm->wm[0]; if (drm_WARN_ON(&dev_priv->drm, !r->enable)) continue; results->wm_pipe[pipe] = WM0_PIPE_PRIMARY(r->pri_val) | WM0_PIPE_SPRITE(r->spr_val) | WM0_PIPE_CURSOR(r->cur_val); } } /* * Find the result with the highest level enabled. Check for enable_fbc_wm in * case both are at the same level. Prefer r1 in case they're the same. */ static struct intel_pipe_wm * ilk_find_best_result(struct drm_i915_private *dev_priv, struct intel_pipe_wm *r1, struct intel_pipe_wm *r2) { int level, level1 = 0, level2 = 0; for (level = 1; level < dev_priv->display.wm.num_levels; level++) { if (r1->wm[level].enable) level1 = level; if (r2->wm[level].enable) level2 = level; } if (level1 == level2) { if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) return r2; else return r1; } else if (level1 > level2) { return r1; } else { return r2; } } /* dirty bits used to track which watermarks need changes */ #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) #define WM_DIRTY_FBC (1 << 24) #define WM_DIRTY_DDB (1 << 25) static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, const struct ilk_wm_values *old, const struct ilk_wm_values *new) { unsigned int dirty = 0; enum pipe pipe; int wm_lp; for_each_pipe(dev_priv, pipe) { if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { dirty |= WM_DIRTY_PIPE(pipe); /* Must disable LP1+ watermarks too */ dirty |= WM_DIRTY_LP_ALL; } } if (old->enable_fbc_wm != new->enable_fbc_wm) { dirty |= WM_DIRTY_FBC; /* Must disable LP1+ watermarks too */ dirty |= WM_DIRTY_LP_ALL; } if (old->partitioning != new->partitioning) { dirty |= WM_DIRTY_DDB; /* Must disable LP1+ watermarks too */ dirty |= WM_DIRTY_LP_ALL; } /* LP1+ watermarks already deemed dirty, no need to continue */ if (dirty & WM_DIRTY_LP_ALL) return dirty; /* Find the lowest numbered LP1+ watermark in need of an update... */ for (wm_lp = 1; wm_lp <= 3; wm_lp++) { if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) break; } /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ for (; wm_lp <= 3; wm_lp++) dirty |= WM_DIRTY_LP(wm_lp); return dirty; } static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, unsigned int dirty) { struct ilk_wm_values *previous = &dev_priv->display.wm.hw; bool changed = false; if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { previous->wm_lp[2] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); changed = true; } if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { previous->wm_lp[1] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); changed = true; } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { previous->wm_lp[0] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); changed = true; } /* * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ return changed; } /* * The spec says we shouldn't write when we don't need, because every write * causes WMs to be re-evaluated, expending some power. */ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { struct ilk_wm_values *previous = &dev_priv->display.wm.hw; unsigned int dirty; dirty = ilk_compute_wm_dirty(dev_priv, previous, results); if (!dirty) return; _ilk_disable_lp_wm(dev_priv, dirty); if (dirty & WM_DIRTY_PIPE(PIPE_A)) intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); if (dirty & WM_DIRTY_PIPE(PIPE_B)) intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); if (dirty & WM_DIRTY_PIPE(PIPE_C)) intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); if (dirty & WM_DIRTY_DDB) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, results->partitioning == INTEL_DDB_PART_1_2 ? 0 : WM_MISC_DATA_PARTITION_5_6); else intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, results->partitioning == INTEL_DDB_PART_1_2 ? 0 : DISP_DATA_PARTITION_5_6); } if (dirty & WM_DIRTY_FBC) intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); if (DISPLAY_VER(dev_priv) >= 7) { if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); dev_priv->display.wm.hw = *results; } bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) { return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, struct intel_wm_config *config) { struct intel_crtc *crtc; /* Compute the currently _active_ config */ for_each_intel_crtc(&dev_priv->drm, crtc) { const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; if (!wm->pipe_enabled) continue; config->sprites_enabled |= wm->sprites_enabled; config->sprites_scaled |= wm->sprites_scaled; config->num_pipes_active++; } } static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; struct intel_wm_config config = {}; struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; ilk_compute_wm_config(dev_priv, &config); ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ if (DISPLAY_VER(dev_priv) >= 7 && config.num_pipes_active == 1 && config.sprites_enabled) { ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); } else { best_lp_wm = &lp_wm_1_2; } partitioning = (best_lp_wm == &lp_wm_1_2) ? INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); ilk_write_wm_values(dev_priv, &results); } static void ilk_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); memset(active, 0, sizeof(*active)); active->pipe_enabled = crtc->active; if (active->pipe_enabled) { u32 tmp = hw->wm_pipe[pipe]; /* * For active pipes LP0 watermark is marked as * enabled, and LP1+ watermaks as disabled since * we can't really reverse compute them in case * multiple pipes are active. */ active->wm[0].enable = true; active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); } else { int level; /* * For inactive pipes, all watermark levels * should be marked as enabled but zeroed, * which is what we'd compute them to. */ for (level = 0; level < dev_priv->display.wm.num_levels; level++) active->wm[level].enable = true; } crtc->wm.active.ilk = *active; } static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state) { struct drm_plane *plane; struct intel_crtc *crtc; for_each_intel_crtc(state->dev, crtc) { struct intel_crtc_state *crtc_state; crtc_state = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); if (crtc_state->hw.active) { /* * Preserve the inherited flag to avoid * taking the full modeset path. */ crtc_state->inherited = true; } } drm_for_each_plane(plane, state->dev) { struct drm_plane_state *plane_state; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); } return 0; } /* * Calculate what we think the watermarks should be for the state we've read * out of the hardware and then immediately program those watermarks so that * we ensure the hardware settings match our internal state. * * We can calculate what we think WM's should be by creating a duplicate of the * current state (which was constructed during hardware readout) and running it * through the atomic check code to calculate new watermark values in the * state object. */ void ilk_wm_sanitize(struct drm_i915_private *dev_priv) { struct drm_atomic_state *state; struct intel_atomic_state *intel_state; struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; struct drm_modeset_acquire_ctx ctx; int ret; int i; /* Only supported on platforms that use atomic watermark design */ if (!dev_priv->display.funcs.wm->optimize_watermarks) return; if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9)) return; state = drm_atomic_state_alloc(&dev_priv->drm); if (drm_WARN_ON(&dev_priv->drm, !state)) return; intel_state = to_intel_atomic_state(state); drm_modeset_acquire_init(&ctx, 0); state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; retry: /* * Hardware readout is the only time we don't want to calculate * intermediate watermarks (since we don't trust the current * watermarks). */ if (!HAS_GMCH(dev_priv)) intel_state->skip_intermediate_wm = true; ret = ilk_sanitize_watermarks_add_affected(state); if (ret) goto fail; ret = intel_atomic_check(&dev_priv->drm, state); if (ret) goto fail; /* Write calculated watermark values back */ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { crtc_state->wm.need_postvbl_update = true; intel_optimize_watermarks(intel_state, crtc); to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } fail: if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } /* * If we fail here, it means that the hardware appears to be * programmed in a way that shouldn't be possible, given our * understanding of watermark requirements. This might mean a * mistake in the hardware readout code or a mistake in the * watermark calculations for a given platform. Raise a WARN * so that this is noticeable. * * If this actually happens, we'll have to just leave the * BIOS-programmed watermarks untouched and hope for the best. */ drm_WARN(&dev_priv->drm, ret, "Could not determine valid watermarks for inherited state\n"); drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } #define _FW_WM(value, plane) \ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) #define _FW_WM_VLV(value, plane) \ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) static void g4x_read_wm_values(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { u32 tmp; tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); wm->fbc_en = tmp & DSPFW_FBC_SR_EN; wm->sr.fbc = _FW_WM(tmp, FBC_SR); wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); wm->hpll.plane = _FW_WM(tmp, HPLL_SR); } static void vlv_read_wm_values(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { enum pipe pipe; u32 tmp; for_each_pipe(dev_priv, pipe) { tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); wm->ddl[pipe].plane[PLANE_PRIMARY] = (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); wm->ddl[pipe].plane[PLANE_CURSOR] = (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); wm->ddl[pipe].plane[PLANE_SPRITE0] = (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); wm->ddl[pipe].plane[PLANE_SPRITE1] = (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); } tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); if (IS_CHERRYVIEW(dev_priv)) { tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } else { tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } } #undef _FW_WM #undef _FW_WM_VLV static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) { struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; struct intel_crtc *crtc; g4x_read_wm_values(dev_priv, wm); wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct g4x_wm_state *active = &crtc->wm.active.g4x; struct g4x_pipe_wm *raw; enum pipe pipe = crtc->pipe; enum plane_id plane_id; int level, max_level; active->cxsr = wm->cxsr; active->hpll_en = wm->hpll_en; active->fbc_en = wm->fbc_en; active->sr = wm->sr; active->hpll = wm->hpll; for_each_plane_id_on_crtc(crtc, plane_id) { active->wm.plane[plane_id] = wm->pipe[pipe].plane[plane_id]; } if (wm->cxsr && wm->hpll_en) max_level = G4X_WM_LEVEL_HPLL; else if (wm->cxsr) max_level = G4X_WM_LEVEL_SR; else max_level = G4X_WM_LEVEL_NORMAL; level = G4X_WM_LEVEL_NORMAL; raw = &crtc_state->wm.g4x.raw[level]; for_each_plane_id_on_crtc(crtc, plane_id) raw->plane[plane_id] = active->wm.plane[plane_id]; level = G4X_WM_LEVEL_SR; if (level > max_level) goto out; raw = &crtc_state->wm.g4x.raw[level]; raw->plane[PLANE_PRIMARY] = active->sr.plane; raw->plane[PLANE_CURSOR] = active->sr.cursor; raw->plane[PLANE_SPRITE0] = 0; raw->fbc = active->sr.fbc; level = G4X_WM_LEVEL_HPLL; if (level > max_level) goto out; raw = &crtc_state->wm.g4x.raw[level]; raw->plane[PLANE_PRIMARY] = active->hpll.plane; raw->plane[PLANE_CURSOR] = active->hpll.cursor; raw->plane[PLANE_SPRITE0] = 0; raw->fbc = active->hpll.fbc; level++; out: for_each_plane_id_on_crtc(crtc, plane_id) g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); g4x_invalidate_wms(crtc, active, level); crtc_state->wm.g4x.optimal = *active; crtc_state->wm.g4x.intermediate = *active; drm_dbg_kms(&dev_priv->drm, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", pipe_name(pipe), wm->pipe[pipe].plane[PLANE_PRIMARY], wm->pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0]); } drm_dbg_kms(&dev_priv->drm, "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", wm->sr.plane, wm->sr.cursor, wm->sr.fbc); drm_dbg_kms(&dev_priv->drm, "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), str_yes_no(wm->fbc_en)); } static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) { struct intel_plane *plane; struct intel_crtc *crtc; mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); enum plane_id plane_id = plane->id; int level; if (plane_state->uapi.visible) continue; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; raw->plane[plane_id] = 0; if (plane_id == PLANE_PRIMARY) raw->fbc = 0; } } for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int ret; ret = _g4x_compute_pipe_wm(crtc_state); drm_WARN_ON(&dev_priv->drm, ret); crtc_state->wm.g4x.intermediate = crtc_state->wm.g4x.optimal; crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; } g4x_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) { g4x_wm_get_hw_state(i915); g4x_wm_sanitize(i915); } static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) { struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; struct intel_crtc *crtc; u32 val; vlv_read_wm_values(dev_priv, wm); wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; wm->level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (val & DSP_MAXFIFO_PM5_ENABLE) wm->level = VLV_WM_LEVEL_PM5; /* * If DDR DVFS is disabled in the BIOS, Punit * will never ack the request. So if that happens * assume we don't have to enable/disable DDR DVFS * dynamically. To test that just set the REQ_ACK * bit to poke the Punit, but don't change the * HIGH/LOW bits so that we don't actually change * the current state. */ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); val |= FORCE_DDR_FREQ_REQ_ACK; vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { drm_dbg_kms(&dev_priv->drm, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if ((val & FORCE_DDR_HIGH_FREQ) == 0) wm->level = VLV_WM_LEVEL_DDR_DVFS; } vlv_punit_put(dev_priv); } for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct vlv_wm_state *active = &crtc->wm.active.vlv; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; enum pipe pipe = crtc->pipe; enum plane_id plane_id; int level; vlv_get_fifo_size(crtc_state); active->num_levels = wm->level + 1; active->cxsr = wm->cxsr; for (level = 0; level < active->num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; active->sr[level].plane = wm->sr.plane; active->sr[level].cursor = wm->sr.cursor; for_each_plane_id_on_crtc(crtc, plane_id) { active->wm[level].plane[plane_id] = wm->pipe[pipe].plane[plane_id]; raw->plane[plane_id] = vlv_invert_wm_value(active->wm[level].plane[plane_id], fifo_state->plane[plane_id]); } } for_each_plane_id_on_crtc(crtc, plane_id) vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); vlv_invalidate_wms(crtc, active, level); crtc_state->wm.vlv.optimal = *active; crtc_state->wm.vlv.intermediate = *active; drm_dbg_kms(&dev_priv->drm, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", pipe_name(pipe), wm->pipe[pipe].plane[PLANE_PRIMARY], wm->pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0], wm->pipe[pipe].plane[PLANE_SPRITE1]); } drm_dbg_kms(&dev_priv->drm, "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); } static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) { struct intel_plane *plane; struct intel_crtc *crtc; mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); enum plane_id plane_id = plane->id; int level; if (plane_state->uapi.visible) continue; for (level = 0; level < dev_priv->display.wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; raw->plane[plane_id] = 0; } } for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int ret; ret = _vlv_compute_pipe_wm(crtc_state); drm_WARN_ON(&dev_priv->drm, ret); crtc_state->wm.vlv.intermediate = crtc_state->wm.vlv.optimal; crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; } vlv_program_watermarks(dev_priv); mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) { vlv_wm_get_hw_state(i915); vlv_wm_sanitize(i915); } /* * FIXME should probably kill this and improve * the real watermark readout/sanitation instead */ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); /* * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ } static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) { struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc *crtc; ilk_init_lp_watermarks(dev_priv); for_each_intel_crtc(&dev_priv->drm, crtc) ilk_pipe_wm_get_hw_state(crtc); hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); if (DISPLAY_VER(dev_priv) >= 7) { hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; else if (IS_IVYBRIDGE(dev_priv)) hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; hw->enable_fbc_wm = !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } static const struct intel_wm_funcs ilk_wm_funcs = { .compute_pipe_wm = ilk_compute_pipe_wm, .compute_intermediate_wm = ilk_compute_intermediate_wm, .initial_watermarks = ilk_initial_watermarks, .optimize_watermarks = ilk_optimize_watermarks, .get_hw_state = ilk_wm_get_hw_state, }; static const struct intel_wm_funcs vlv_wm_funcs = { .compute_pipe_wm = vlv_compute_pipe_wm, .compute_intermediate_wm = vlv_compute_intermediate_wm, .initial_watermarks = vlv_initial_watermarks, .optimize_watermarks = vlv_optimize_watermarks, .atomic_update_watermarks = vlv_atomic_update_fifo, .get_hw_state = vlv_wm_get_hw_state_and_sanitize, }; static const struct intel_wm_funcs g4x_wm_funcs = { .compute_pipe_wm = g4x_compute_pipe_wm, .compute_intermediate_wm = g4x_compute_intermediate_wm, .initial_watermarks = g4x_initial_watermarks, .optimize_watermarks = g4x_optimize_watermarks, .get_hw_state = g4x_wm_get_hw_state_and_sanitize, }; static const struct intel_wm_funcs pnv_wm_funcs = { .update_wm = pnv_update_wm, }; static const struct intel_wm_funcs i965_wm_funcs = { .update_wm = i965_update_wm, }; static const struct intel_wm_funcs i9xx_wm_funcs = { .update_wm = i9xx_update_wm, }; static const struct intel_wm_funcs i845_wm_funcs = { .update_wm = i845_update_wm, }; static const struct intel_wm_funcs nop_funcs = { }; void i9xx_wm_init(struct drm_i915_private *dev_priv) { /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); dev_priv->display.funcs.wm = &ilk_wm_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); dev_priv->display.funcs.wm = &vlv_wm_funcs; } else if (IS_G4X(dev_priv)) { g4x_setup_wm_latency(dev_priv); dev_priv->display.funcs.wm = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq)) { drm_info(&dev_priv->drm, "failed to find known CxSR latency " "(found ddr%s fsb freq %d, mem freq %d), " "disabling CxSR\n", (dev_priv->is_ddr3 == 1) ? "3" : "2", dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ intel_set_memory_cxsr(dev_priv, false); dev_priv->display.funcs.wm = &nop_funcs; } else { dev_priv->display.funcs.wm = &pnv_wm_funcs; } } else if (DISPLAY_VER(dev_priv) == 4) { dev_priv->display.funcs.wm = &i965_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 3) { dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 2) { if (INTEL_NUM_PIPES(dev_priv) == 1) dev_priv->display.funcs.wm = &i845_wm_funcs; else dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else { drm_err(&dev_priv->drm, "unexpected fall-through in %s\n", __func__); dev_priv->display.funcs.wm = &nop_funcs; } }
linux-master
drivers/gpu/drm/i915/display/i9xx_wm.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_pcode.h" static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 val; if (!crtc_state->ips_enabled) return; /* * We can only enable IPS after we enable a plane and wait for a vblank * This function is called from post_plane_update, which is run after * a vblank wait. */ drm_WARN_ON(&i915->drm, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); val = IPS_ENABLE; if (i915->display.ips.false_color) val |= IPS_FALSE_COLOR; if (IS_BROADWELL(i915)) { drm_WARN_ON(&i915->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, val | IPS_PCODE_CONTROL)); /* * Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the * mailbox." Moreover, the mailbox may return a bogus state, * so we need to just enable it and continue on. */ } else { intel_de_write(i915, IPS_CTL, val); /* * The bit only becomes 1 in the next vblank, so this wait here * is essentially intel_wait_for_vblank. If we don't have this * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50)) drm_err(&i915->drm, "Timed out waiting for IPS enable\n"); } } bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); bool need_vblank_wait = false; if (!crtc_state->ips_enabled) return need_vblank_wait; if (IS_BROADWELL(i915)) { drm_WARN_ON(&i915->drm, snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100)) drm_err(&i915->drm, "Timed out waiting for IPS disable\n"); } else { intel_de_write(i915, IPS_CTL, 0); intel_de_posting_read(i915, IPS_CTL); } /* We need to wait for a vblank before we can disable the plane. */ need_vblank_wait = true; return need_vblank_wait; } static bool hsw_ips_need_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!old_crtc_state->ips_enabled) return false; if (intel_crtc_needs_modeset(new_crtc_state)) return true; /* * Workaround : Do not read or write the pipe palette/gamma data while * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. * * Disable IPS before we program the LUT. */ if (IS_HASWELL(i915) && intel_crtc_needs_color_update(new_crtc_state) && new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return true; return !new_crtc_state->ips_enabled; } bool hsw_ips_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); if (!hsw_ips_need_disable(state, crtc)) return false; return hsw_ips_disable(old_crtc_state); } static bool hsw_ips_need_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state->ips_enabled) return false; if (intel_crtc_needs_modeset(new_crtc_state)) return true; /* * Workaround : Do not read or write the pipe palette/gamma data while * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. * * Re-enable IPS after the LUT has been programmed. */ if (IS_HASWELL(i915) && intel_crtc_needs_color_update(new_crtc_state) && new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return true; /* * We can't read out IPS on broadwell, assume the worst and * forcibly enable IPS on the first fastset. */ if (intel_crtc_needs_fastset(new_crtc_state) && old_crtc_state->inherited) return true; return !old_crtc_state->ips_enabled; } void hsw_ips_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!hsw_ips_need_enable(state, crtc)) return; hsw_ips_enable(new_crtc_state); } /* IPS only exists on ULT machines and is tied to pipe A. */ bool hsw_crtc_supports_ips(struct intel_crtc *crtc) { return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; } bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* IPS only exists on ULT machines and is tied to pipe A. */ if (!hsw_crtc_supports_ips(crtc)) return false; if (!i915->params.enable_ips) return false; if (crtc_state->pipe_bpp > 24) return false; /* * We compare against max which means we must take * the increased cdclk requirement into account when * calculating the new cdclk. * * Should measure whether using a lower cdclk w/o IPS */ if (IS_BROADWELL(i915) && crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100) return false; return true; } int hsw_ips_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); crtc_state->ips_enabled = false; if (!hsw_crtc_state_ips_capable(crtc_state)) return 0; /* * When IPS gets enabled, the pipe CRC changes. Since IPS gets * enabled and disabled dynamically based on package C states, * user space can't make reliable use of the CRCs, so let's just * completely disable it. */ if (crtc_state->crc_enabled) return 0; /* IPS should be fine as long as at least one plane is enabled. */ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) return 0; if (IS_BROADWELL(i915)) { const struct intel_cdclk_state *cdclk_state; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) return 0; } crtc_state->ips_enabled = true; return 0; } void hsw_ips_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (!hsw_crtc_supports_ips(crtc)) return; if (IS_HASWELL(i915)) { crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE; } else { /* * We cannot readout IPS state on broadwell, set to * true so we can set it to a defined state on first * commit. */ crtc_state->ips_enabled = true; } } static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) { struct intel_crtc *crtc = data; struct drm_i915_private *i915 = to_i915(crtc->base.dev); *val = i915->display.ips.false_color; return 0; } static int hsw_ips_debugfs_false_color_set(void *data, u64 val) { struct intel_crtc *crtc = data; struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state; int ret; ret = drm_modeset_lock(&crtc->base.mutex, NULL); if (ret) return ret; i915->display.ips.false_color = val; crtc_state = to_intel_crtc_state(crtc->base.state); if (!crtc_state->hw.active) goto unlock; if (crtc_state->uapi.commit && !try_wait_for_completion(&crtc_state->uapi.commit->hw_done)) goto unlock; hsw_ips_enable(crtc_state); unlock: drm_modeset_unlock(&crtc->base.mutex); return ret; } DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops, hsw_ips_debugfs_false_color_get, hsw_ips_debugfs_false_color_set, "%llu\n"); static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_wakeref_t wakeref; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", str_yes_no(i915->params.enable_ips)); if (DISPLAY_VER(i915) >= 8) { seq_puts(m, "Currently: unknown\n"); } else { if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE) seq_puts(m, "Currently: enabled\n"); else seq_puts(m, "Currently: disabled\n"); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); return 0; } DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status); void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc) { if (!hsw_crtc_supports_ips(crtc)) return; debugfs_create_file("i915_ips_false_color", 0644, crtc->base.debugfs_entry, crtc, &hsw_ips_debugfs_false_color_fops); debugfs_create_file("i915_ips_status", 0444, crtc->base.debugfs_entry, crtc, &hsw_ips_debugfs_status_fops); }
linux-master
drivers/gpu/drm/i915/display/hsw_ips.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include "i915_drv.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_cx0_phy.h" /* HDMI/DVI modes ignore everything but the last 2 items. So we share * them for both DP and FDI transports, allowing those ports to * automatically adapt to HDMI connections as well */ static const union intel_ddi_buf_trans_entry _hsw_trans_dp[] = { { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00040006, 0x0 } }, { .hsw = { 0x80AAAFFF, 0x000B0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, { .hsw = { 0x80C30FFF, 0x000B0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, { .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } }, }; static const struct intel_ddi_buf_trans hsw_trans_dp = { .entries = _hsw_trans_dp, .num_entries = ARRAY_SIZE(_hsw_trans_dp), }; static const union intel_ddi_buf_trans_entry _hsw_trans_fdi[] = { { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00060006, 0x0 } }, { .hsw = { 0x00AAAFFF, 0x001E0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x000F000A, 0x0 } }, { .hsw = { 0x00D75FFF, 0x00160004, 0x0 } }, { .hsw = { 0x00C30FFF, 0x001E0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x00060006, 0x0 } }, { .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } }, }; static const struct intel_ddi_buf_trans hsw_trans_fdi = { .entries = _hsw_trans_fdi, .num_entries = ARRAY_SIZE(_hsw_trans_fdi), }; static const union intel_ddi_buf_trans_entry _hsw_trans_hdmi[] = { /* Idx NT mV d T mV d db */ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */ { .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */ { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, /* 2: 400 600 3.5 */ { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, /* 3: 600 600 0 */ { .hsw = { 0x00E79FFF, 0x001D0007, 0x0 } }, /* 4: 600 750 2 */ { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, /* 5: 600 900 3.5 */ { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, /* 6: 800 800 0 */ { .hsw = { 0x80E79FFF, 0x00030002, 0x0 } }, /* 7: 800 1000 2 */ { .hsw = { 0x00FFFFFF, 0x00140005, 0x0 } }, /* 8: 850 850 0 */ { .hsw = { 0x00FFFFFF, 0x000C0004, 0x0 } }, /* 9: 900 900 0 */ { .hsw = { 0x00FFFFFF, 0x001C0003, 0x0 } }, /* 10: 950 950 0 */ { .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */ }; static const struct intel_ddi_buf_trans hsw_trans_hdmi = { .entries = _hsw_trans_hdmi, .num_entries = ARRAY_SIZE(_hsw_trans_hdmi), .hdmi_default_entry = 6, }; static const union intel_ddi_buf_trans_entry _bdw_trans_edp[] = { { .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } }, { .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } }, { .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } }, { .hsw = { 0x00AAAFFF, 0x000E000A, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x00020011, 0x0 } }, { .hsw = { 0x00DB6FFF, 0x0005000F, 0x0 } }, { .hsw = { 0x00BEEFFF, 0x000A000C, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x0005000F, 0x0 } }, { .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } }, }; static const struct intel_ddi_buf_trans bdw_trans_edp = { .entries = _bdw_trans_edp, .num_entries = ARRAY_SIZE(_bdw_trans_edp), }; static const union intel_ddi_buf_trans_entry _bdw_trans_dp[] = { { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, { .hsw = { 0x80B2CFFF, 0x001B0002, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, { .hsw = { 0x00DB6FFF, 0x00160005, 0x0 } }, { .hsw = { 0x80C71FFF, 0x001A0002, 0x0 } }, { .hsw = { 0x00F7DFFF, 0x00180004, 0x0 } }, { .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } }, }; static const struct intel_ddi_buf_trans bdw_trans_dp = { .entries = _bdw_trans_dp, .num_entries = ARRAY_SIZE(_bdw_trans_dp), }; static const union intel_ddi_buf_trans_entry _bdw_trans_fdi[] = { { .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00070006, 0x0 } }, { .hsw = { 0x00AAAFFF, 0x000C0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x0004000A, 0x0 } }, { .hsw = { 0x00D75FFF, 0x00090004, 0x0 } }, { .hsw = { 0x00C30FFF, 0x000C0000, 0x0 } }, { .hsw = { 0x00FFFFFF, 0x00070006, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } }, }; static const struct intel_ddi_buf_trans bdw_trans_fdi = { .entries = _bdw_trans_fdi, .num_entries = ARRAY_SIZE(_bdw_trans_fdi), }; static const union intel_ddi_buf_trans_entry _bdw_trans_hdmi[] = { /* Idx NT mV d T mV df db */ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */ { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, /* 2: 400 800 6 */ { .hsw = { 0x00FFFFFF, 0x0009000D, 0x0 } }, /* 3: 450 450 0 */ { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, /* 4: 600 600 0 */ { .hsw = { 0x00D7FFFF, 0x00140006, 0x0 } }, /* 5: 600 800 2.5 */ { .hsw = { 0x80CB2FFF, 0x001B0002, 0x0 } }, /* 6: 600 1000 4.5 */ { .hsw = { 0x00FFFFFF, 0x00140006, 0x0 } }, /* 7: 800 800 0 */ { .hsw = { 0x80E79FFF, 0x001B0002, 0x0 } }, /* 8: 800 1000 2 */ { .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */ }; static const struct intel_ddi_buf_trans bdw_trans_hdmi = { .entries = _bdw_trans_hdmi, .num_entries = ARRAY_SIZE(_bdw_trans_hdmi), .hdmi_default_entry = 7, }; /* Skylake H and S */ static const union intel_ddi_buf_trans_entry _skl_trans_dp[] = { { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, { .hsw = { 0x00002016, 0x0000009B, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, { .hsw = { 0x00002016, 0x000000DF, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; static const struct intel_ddi_buf_trans skl_trans_dp = { .entries = _skl_trans_dp, .num_entries = ARRAY_SIZE(_skl_trans_dp), }; /* Skylake U */ static const union intel_ddi_buf_trans_entry _skl_u_trans_dp[] = { { .hsw = { 0x0000201B, 0x000000A2, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x1 } }, { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, { .hsw = { 0x0000201B, 0x0000009D, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, { .hsw = { 0x00002016, 0x00000088, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; static const struct intel_ddi_buf_trans skl_u_trans_dp = { .entries = _skl_u_trans_dp, .num_entries = ARRAY_SIZE(_skl_u_trans_dp), }; /* Skylake Y */ static const union intel_ddi_buf_trans_entry _skl_y_trans_dp[] = { { .hsw = { 0x00000018, 0x000000A2, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, { .hsw = { 0x80009010, 0x000000C0, 0x3 } }, { .hsw = { 0x00000018, 0x0000009D, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, { .hsw = { 0x00000018, 0x00000088, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; static const struct intel_ddi_buf_trans skl_y_trans_dp = { .entries = _skl_y_trans_dp, .num_entries = ARRAY_SIZE(_skl_y_trans_dp), }; /* Kabylake H and S */ static const union intel_ddi_buf_trans_entry _kbl_trans_dp[] = { { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, { .hsw = { 0x80009010, 0x000000C0, 0x1 } }, { .hsw = { 0x00002016, 0x0000009B, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000C0, 0x1 } }, { .hsw = { 0x00002016, 0x00000097, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; static const struct intel_ddi_buf_trans kbl_trans_dp = { .entries = _kbl_trans_dp, .num_entries = ARRAY_SIZE(_kbl_trans_dp), }; /* Kabylake U */ static const union intel_ddi_buf_trans_entry _kbl_u_trans_dp[] = { { .hsw = { 0x0000201B, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, { .hsw = { 0x80009010, 0x000000C0, 0x3 } }, { .hsw = { 0x0000201B, 0x0000009D, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, { .hsw = { 0x00002016, 0x0000004F, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; static const struct intel_ddi_buf_trans kbl_u_trans_dp = { .entries = _kbl_u_trans_dp, .num_entries = ARRAY_SIZE(_kbl_u_trans_dp), }; /* Kabylake Y */ static const union intel_ddi_buf_trans_entry _kbl_y_trans_dp[] = { { .hsw = { 0x00001017, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, { .hsw = { 0x8000800F, 0x000000C0, 0x3 } }, { .hsw = { 0x00001017, 0x0000009D, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, { .hsw = { 0x80007011, 0x000000C0, 0x3 } }, { .hsw = { 0x00001017, 0x0000004C, 0x0 } }, { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; static const struct intel_ddi_buf_trans kbl_y_trans_dp = { .entries = _kbl_y_trans_dp, .num_entries = ARRAY_SIZE(_kbl_y_trans_dp), }; /* * Skylake/Kabylake H and S * eDP 1.4 low vswing translation parameters */ static const union intel_ddi_buf_trans_entry _skl_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, { .hsw = { 0x00009010, 0x0000009C, 0x0 } }, { .hsw = { 0x00000018, 0x000000A9, 0x0 } }, { .hsw = { 0x00006013, 0x000000A2, 0x0 } }, { .hsw = { 0x00007011, 0x000000A6, 0x0 } }, { .hsw = { 0x00000018, 0x000000AB, 0x0 } }, { .hsw = { 0x00007013, 0x0000009F, 0x0 } }, { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, }; static const struct intel_ddi_buf_trans skl_trans_edp = { .entries = _skl_trans_edp, .num_entries = ARRAY_SIZE(_skl_trans_edp), }; /* * Skylake/Kabylake U * eDP 1.4 low vswing translation parameters */ static const union intel_ddi_buf_trans_entry _skl_u_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, { .hsw = { 0x00009010, 0x0000009C, 0x0 } }, { .hsw = { 0x00000018, 0x000000A9, 0x0 } }, { .hsw = { 0x00006013, 0x000000A2, 0x0 } }, { .hsw = { 0x00007011, 0x000000A6, 0x0 } }, { .hsw = { 0x00002016, 0x000000AB, 0x0 } }, { .hsw = { 0x00005013, 0x0000009F, 0x0 } }, { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, }; static const struct intel_ddi_buf_trans skl_u_trans_edp = { .entries = _skl_u_trans_edp, .num_entries = ARRAY_SIZE(_skl_u_trans_edp), }; /* * Skylake/Kabylake Y * eDP 1.4 low vswing translation parameters */ static const union intel_ddi_buf_trans_entry _skl_y_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000AB, 0x0 } }, { .hsw = { 0x00007011, 0x000000A4, 0x0 } }, { .hsw = { 0x00009010, 0x000000DF, 0x0 } }, { .hsw = { 0x00000018, 0x000000AA, 0x0 } }, { .hsw = { 0x00006013, 0x000000A4, 0x0 } }, { .hsw = { 0x00007011, 0x0000009D, 0x0 } }, { .hsw = { 0x00000018, 0x000000A0, 0x0 } }, { .hsw = { 0x00006012, 0x000000DF, 0x0 } }, { .hsw = { 0x00000018, 0x0000008A, 0x0 } }, }; static const struct intel_ddi_buf_trans skl_y_trans_edp = { .entries = _skl_y_trans_edp, .num_entries = ARRAY_SIZE(_skl_y_trans_edp), }; /* Skylake/Kabylake U, H and S */ static const union intel_ddi_buf_trans_entry _skl_trans_hdmi[] = { { .hsw = { 0x00000018, 0x000000AC, 0x0 } }, { .hsw = { 0x00005012, 0x0000009D, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, { .hsw = { 0x00000018, 0x000000A1, 0x0 } }, { .hsw = { 0x00000018, 0x00000098, 0x0 } }, { .hsw = { 0x00004013, 0x00000088, 0x0 } }, { .hsw = { 0x80006012, 0x000000CD, 0x1 } }, { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, { .hsw = { 0x80003015, 0x000000CD, 0x1 } }, /* Default */ { .hsw = { 0x80003015, 0x000000C0, 0x1 } }, { .hsw = { 0x80000018, 0x000000C0, 0x1 } }, }; static const struct intel_ddi_buf_trans skl_trans_hdmi = { .entries = _skl_trans_hdmi, .num_entries = ARRAY_SIZE(_skl_trans_hdmi), .hdmi_default_entry = 8, }; /* Skylake/Kabylake Y */ static const union intel_ddi_buf_trans_entry _skl_y_trans_hdmi[] = { { .hsw = { 0x00000018, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x000000DF, 0x0 } }, { .hsw = { 0x80007011, 0x000000CB, 0x3 } }, { .hsw = { 0x00000018, 0x000000A4, 0x0 } }, { .hsw = { 0x00000018, 0x0000009D, 0x0 } }, { .hsw = { 0x00004013, 0x00000080, 0x0 } }, { .hsw = { 0x80006013, 0x000000C0, 0x3 } }, { .hsw = { 0x00000018, 0x0000008A, 0x0 } }, { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, /* Default */ { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, { .hsw = { 0x80000018, 0x000000C0, 0x3 } }, }; static const struct intel_ddi_buf_trans skl_y_trans_hdmi = { .entries = _skl_y_trans_hdmi, .num_entries = ARRAY_SIZE(_skl_y_trans_hdmi), .hdmi_default_entry = 8, }; static const union intel_ddi_buf_trans_entry _bxt_trans_dp[] = { /* Idx NT mV diff db */ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ { .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ { .bxt = { 104, 0x9A, 0, 64, } }, /* 2: 400 6 */ { .bxt = { 154, 0x9A, 0, 43, } }, /* 3: 400 9.5 */ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */ { .bxt = { 116, 0x9A, 0, 85, } }, /* 5: 600 3.5 */ { .bxt = { 154, 0x9A, 0, 64, } }, /* 6: 600 6 */ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */ { .bxt = { 154, 0x9A, 0, 85, } }, /* 8: 800 3.5 */ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ }; static const struct intel_ddi_buf_trans bxt_trans_dp = { .entries = _bxt_trans_dp, .num_entries = ARRAY_SIZE(_bxt_trans_dp), }; static const union intel_ddi_buf_trans_entry _bxt_trans_edp[] = { /* Idx NT mV diff db */ { .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */ { .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */ { .bxt = { 48, 0, 0, 96, } }, /* 2: 200 4 */ { .bxt = { 54, 0, 0, 69, } }, /* 3: 200 6 */ { .bxt = { 32, 0, 0, 128, } }, /* 4: 250 0 */ { .bxt = { 48, 0, 0, 104, } }, /* 5: 250 1.5 */ { .bxt = { 54, 0, 0, 85, } }, /* 6: 250 4 */ { .bxt = { 43, 0, 0, 128, } }, /* 7: 300 0 */ { .bxt = { 54, 0, 0, 101, } }, /* 8: 300 1.5 */ { .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */ }; static const struct intel_ddi_buf_trans bxt_trans_edp = { .entries = _bxt_trans_edp, .num_entries = ARRAY_SIZE(_bxt_trans_edp), }; /* BSpec has 2 recommended values - entries 0 and 8. * Using the entry with higher vswing. */ static const union intel_ddi_buf_trans_entry _bxt_trans_hdmi[] = { /* Idx NT mV diff db */ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ { .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ { .bxt = { 52, 0x9A, 0, 64, } }, /* 2: 400 6 */ { .bxt = { 42, 0x9A, 0, 43, } }, /* 3: 400 9.5 */ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */ { .bxt = { 77, 0x9A, 0, 85, } }, /* 5: 600 3.5 */ { .bxt = { 77, 0x9A, 0, 64, } }, /* 6: 600 6 */ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */ { .bxt = { 102, 0x9A, 0, 85, } }, /* 8: 800 3.5 */ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ }; static const struct intel_ddi_buf_trans bxt_trans_hdmi = { .entries = _bxt_trans_hdmi, .num_entries = ARRAY_SIZE(_bxt_trans_hdmi), .hdmi_default_entry = ARRAY_SIZE(_bxt_trans_hdmi) - 1, }; /* icl_combo_phy_trans */ static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_dp_hbr2_edp_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans icl_combo_phy_trans_dp_hbr2_edp_hbr3 = { .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3, .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3), }; static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */ { .icl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } }, /* 200 350 4.9 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ { .icl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ { .icl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ { .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; static const struct intel_ddi_buf_trans icl_combo_phy_trans_edp_hbr2 = { .entries = _icl_combo_phy_trans_edp_hbr2, .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2), }; static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_hdmi[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ { .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */ { .icl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */ { .icl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ }; static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { .entries = _icl_combo_phy_trans_hdmi, .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_hdmi), .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_trans_hdmi) - 1, }; static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans ehl_combo_phy_trans_dp = { .entries = _ehl_combo_phy_trans_dp, .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_dp), }; static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */ { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 200 350 4.9 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ { .icl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } }, /* 250 300 1.6 */ { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 250 350 2.9 */ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; static const struct intel_ddi_buf_trans ehl_combo_phy_trans_edp_hbr2 = { .entries = _ehl_combo_phy_trans_edp_hbr2, .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_edp_hbr2), }; static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */ { .icl = { 0xA, 0x35, 0x36, 0x00, 0x09 } }, /* 200 350 4.9 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ { .icl = { 0xA, 0x35, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr = { .entries = _jsl_combo_phy_trans_edp_hbr, .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr), }; static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 200 350 4.9 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 300 1.6 */ { .icl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } }, /* 250 350 2.9 */ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr2 = { .entries = _jsl_combo_phy_trans_edp_hbr2, .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr2), }; static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_rbr_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_rbr_hbr = { .entries = _dg1_combo_phy_trans_dp_rbr_hbr, .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_rbr_hbr), }; static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_hbr2_hbr3 = { .entries = _dg1_combo_phy_trans_dp_hbr2_hbr3, .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_hbr2_hbr3), }; static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_rbr_hbr[] = { /* Voltage swing pre-emphasis */ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */ { .mg = { 0x21, 0x00, 0x00 } }, /* 1 0 */ { .mg = { 0x2B, 0x00, 0x08 } }, /* 1 1 */ { .mg = { 0x30, 0x00, 0x0F } }, /* 1 2 */ { .mg = { 0x31, 0x00, 0x03 } }, /* 2 0 */ { .mg = { 0x34, 0x00, 0x0B } }, /* 2 1 */ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ }; static const struct intel_ddi_buf_trans icl_mg_phy_trans_rbr_hbr = { .entries = _icl_mg_phy_trans_rbr_hbr, .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_rbr_hbr), }; static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hbr2_hbr3[] = { /* Voltage swing pre-emphasis */ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */ { .mg = { 0x26, 0x00, 0x00 } }, /* 1 0 */ { .mg = { 0x2C, 0x00, 0x07 } }, /* 1 1 */ { .mg = { 0x33, 0x00, 0x0C } }, /* 1 2 */ { .mg = { 0x2E, 0x00, 0x00 } }, /* 2 0 */ { .mg = { 0x36, 0x00, 0x09 } }, /* 2 1 */ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ }; static const struct intel_ddi_buf_trans icl_mg_phy_trans_hbr2_hbr3 = { .entries = _icl_mg_phy_trans_hbr2_hbr3, .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hbr2_hbr3), }; static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hdmi[] = { /* HDMI Preset VS Pre-emph */ { .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */ { .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */ { .mg = { 0x29, 0x0, 0x0 } }, /* 3 650mV 0dB */ { .mg = { 0x32, 0x0, 0x0 } }, /* 4 800mV 0dB */ { .mg = { 0x3F, 0x0, 0x0 } }, /* 5 1000mV 0dB */ { .mg = { 0x3A, 0x0, 0x5 } }, /* 6 Full -1.5 dB */ { .mg = { 0x39, 0x0, 0x6 } }, /* 7 Full -1.8 dB */ { .mg = { 0x38, 0x0, 0x7 } }, /* 8 Full -2 dB */ { .mg = { 0x37, 0x0, 0x8 } }, /* 9 Full -2.5 dB */ { .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */ }; static const struct intel_ddi_buf_trans icl_mg_phy_trans_hdmi = { .entries = _icl_mg_phy_trans_hdmi, .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hdmi), .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_trans_hdmi) - 1, }; static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ }; static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr = { .entries = _tgl_dkl_phy_trans_dp_hbr, .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr), }; static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr2[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ { .dkl = { 0x0, 0x0, 0x19 } }, /* 0 3 400mV 9.5 dB */ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ }; static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr2 = { .entries = _tgl_dkl_phy_trans_dp_hbr2, .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr2), }; static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_hdmi[] = { /* HDMI Preset VS Pre-emph */ { .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */ { .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */ { .dkl = { 0x4, 0x0, 0x0 } }, /* 3 650mV 0dB */ { .dkl = { 0x2, 0x0, 0x0 } }, /* 4 800mV 0dB */ { .dkl = { 0x0, 0x0, 0x0 } }, /* 5 1000mV 0dB */ { .dkl = { 0x0, 0x0, 0x5 } }, /* 6 Full -1.5 dB */ { .dkl = { 0x0, 0x0, 0x6 } }, /* 7 Full -1.8 dB */ { .dkl = { 0x0, 0x0, 0x7 } }, /* 8 Full -2 dB */ { .dkl = { 0x0, 0x0, 0x8 } }, /* 9 Full -2.5 dB */ { .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */ }; static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_hdmi = { .entries = _tgl_dkl_phy_trans_hdmi, .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi), .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi) - 1, }; static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr = { .entries = _tgl_combo_phy_trans_dp_hbr, .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr), }; static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x63, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr2 = { .entries = _tgl_combo_phy_trans_dp_hbr2, .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr2), }; static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_trans_dp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x60, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */ { .icl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */ { .icl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans tgl_uy_combo_phy_trans_dp_hbr2 = { .entries = _tgl_uy_combo_phy_trans_dp_hbr2, .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_trans_dp_hbr2), }; /* * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries * that DisplayPort specification requires */ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_edp_hbr2_hobl[] = { /* VS pre-emp */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 2 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 3 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 0 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 1 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 2 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 0 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */ }; static const struct intel_ddi_buf_trans tgl_combo_phy_trans_edp_hbr2_hobl = { .entries = _tgl_combo_phy_trans_edp_hbr2_hobl, .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_edp_hbr2_hobl), }; static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr = { .entries = _rkl_combo_phy_trans_dp_hbr, .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr), }; static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x61, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr2_hbr3 = { .entries = _rkl_combo_phy_trans_dp_hbr2_hbr3, .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr2_hbr3), }; static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans adls_combo_phy_trans_dp_hbr2_hbr3 = { .entries = _adls_combo_phy_trans_dp_hbr2_hbr3, .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_dp_hbr2_hbr3), }; static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } }, /* 200 200 0.0 */ { .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } }, /* 200 250 1.9 */ { .icl = { 0x9, 0x7F, 0x3B, 0x00, 0x04 } }, /* 200 300 3.5 */ { .icl = { 0x4, 0x6C, 0x33, 0x00, 0x0C } }, /* 200 350 4.9 */ { .icl = { 0x2, 0x73, 0x3A, 0x00, 0x05 } }, /* 250 250 0.0 */ { .icl = { 0x2, 0x7C, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */ { .icl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } }, /* 250 350 2.9 */ { .icl = { 0x4, 0x57, 0x3D, 0x00, 0x02 } }, /* 300 300 0.0 */ { .icl = { 0x4, 0x65, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */ { .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */ }; static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr2 = { .entries = _adls_combo_phy_trans_edp_hbr2, .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr2), }; static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = { .entries = _adls_combo_phy_trans_edp_hbr3, .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3), }; static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x71, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x7C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr = { .entries = _adlp_combo_phy_trans_dp_hbr, .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr), }; static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0F } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x4, 0x50, 0x38, 0x00, 0x07 } }, /* 200 200 0.0 */ { .icl = { 0x4, 0x58, 0x35, 0x00, 0x0A } }, /* 200 250 1.9 */ { .icl = { 0x4, 0x60, 0x34, 0x00, 0x0B } }, /* 200 300 3.5 */ { .icl = { 0x4, 0x6A, 0x32, 0x00, 0x0D } }, /* 200 350 4.9 */ { .icl = { 0x4, 0x5E, 0x38, 0x00, 0x07 } }, /* 250 250 0.0 */ { .icl = { 0x4, 0x61, 0x36, 0x00, 0x09 } }, /* 250 300 1.6 */ { .icl = { 0x4, 0x6B, 0x34, 0x00, 0x0B } }, /* 250 350 2.9 */ { .icl = { 0x4, 0x69, 0x39, 0x00, 0x06 } }, /* 300 300 0.0 */ { .icl = { 0x4, 0x73, 0x37, 0x00, 0x08 } }, /* 300 350 1.3 */ { .icl = { 0x4, 0x7A, 0x38, 0x00, 0x07 } }, /* 350 350 0.0 */ }; static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_edp_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0f } }, /* 350 700 6.0 */ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = { .entries = _adlp_combo_phy_trans_dp_hbr2_hbr3, .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3), }; static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = { .entries = _adlp_combo_phy_trans_dp_hbr2_edp_hbr3, .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_edp_hbr3), }; static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = { .entries = _adlp_combo_phy_trans_edp_hbr2, .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_edp_hbr2), }; static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */ { .dkl = { 0x0, 0x0, 0x17 } }, /* 0 3 400mV 9.5 dB */ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ }; static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr = { .entries = _adlp_dkl_phy_trans_dp_hbr, .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr), }; static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr2_hbr3[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */ { .dkl = { 0x2, 0x0, 0x0A } }, /* 0 2 400mV 6 dB */ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */ { .dkl = { 0x2, 0x0, 0x06 } }, /* 1 1 600mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */ { .dkl = { 0x0, 0x0, 0x09 } }, /* 2 1 800mV 3.5 dB */ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ }; static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = { .entries = _adlp_dkl_phy_trans_dp_hbr2_hbr3, .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr2_hbr3), }; static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = { { .snps = { 25, 0, 0 } }, /* VS 0, pre-emph 0 */ { .snps = { 32, 0, 6 } }, /* VS 0, pre-emph 1 */ { .snps = { 35, 0, 10 } }, /* VS 0, pre-emph 2 */ { .snps = { 43, 0, 17 } }, /* VS 0, pre-emph 3 */ { .snps = { 35, 0, 0 } }, /* VS 1, pre-emph 0 */ { .snps = { 45, 0, 8 } }, /* VS 1, pre-emph 1 */ { .snps = { 48, 0, 14 } }, /* VS 1, pre-emph 2 */ { .snps = { 47, 0, 0 } }, /* VS 2, pre-emph 0 */ { .snps = { 55, 0, 7 } }, /* VS 2, pre-emph 1 */ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */ }; static const struct intel_ddi_buf_trans dg2_snps_trans = { .entries = _dg2_snps_trans, .num_entries = ARRAY_SIZE(_dg2_snps_trans), .hdmi_default_entry = ARRAY_SIZE(_dg2_snps_trans) - 1, }; static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = { { .snps = { 62, 0, 0 } }, /* preset 0 */ { .snps = { 55, 0, 7 } }, /* preset 1 */ { .snps = { 50, 0, 12 } }, /* preset 2 */ { .snps = { 44, 0, 18 } }, /* preset 3 */ { .snps = { 35, 0, 21 } }, /* preset 4 */ { .snps = { 59, 3, 0 } }, /* preset 5 */ { .snps = { 53, 3, 6 } }, /* preset 6 */ { .snps = { 48, 3, 11 } }, /* preset 7 */ { .snps = { 42, 5, 15 } }, /* preset 8 */ { .snps = { 37, 5, 20 } }, /* preset 9 */ { .snps = { 56, 6, 0 } }, /* preset 10 */ { .snps = { 48, 7, 7 } }, /* preset 11 */ { .snps = { 45, 7, 10 } }, /* preset 12 */ { .snps = { 39, 8, 15 } }, /* preset 13 */ { .snps = { 48, 14, 0 } }, /* preset 14 */ { .snps = { 45, 4, 4 } }, /* preset 15 */ }; static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = { .entries = _dg2_snps_trans_uhbr, .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr), }; static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = { { .snps = { 26, 0, 0 } }, /* preset 0 */ { .snps = { 33, 0, 6 } }, /* preset 1 */ { .snps = { 38, 0, 11 } }, /* preset 2 */ { .snps = { 43, 0, 19 } }, /* preset 3 */ { .snps = { 39, 0, 0 } }, /* preset 4 */ { .snps = { 45, 0, 7 } }, /* preset 5 */ { .snps = { 46, 0, 13 } }, /* preset 6 */ { .snps = { 46, 0, 0 } }, /* preset 7 */ { .snps = { 55, 0, 7 } }, /* preset 8 */ { .snps = { 62, 0, 0 } }, /* preset 9 */ }; static const struct intel_ddi_buf_trans mtl_c10_trans_dp14 = { .entries = _mtl_c10_trans_dp14, .num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14), .hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1, }; /* DP1.4 */ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_dp14[] = { { .snps = { 20, 0, 0 } }, /* preset 0 */ { .snps = { 24, 0, 4 } }, /* preset 1 */ { .snps = { 30, 0, 9 } }, /* preset 2 */ { .snps = { 34, 0, 14 } }, /* preset 3 */ { .snps = { 29, 0, 0 } }, /* preset 4 */ { .snps = { 34, 0, 5 } }, /* preset 5 */ { .snps = { 38, 0, 10 } }, /* preset 6 */ { .snps = { 36, 0, 0 } }, /* preset 7 */ { .snps = { 40, 0, 6 } }, /* preset 8 */ { .snps = { 48, 0, 0 } }, /* preset 9 */ }; /* DP2.0 */ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = { { .snps = { 48, 0, 0 } }, /* preset 0 */ { .snps = { 43, 0, 5 } }, /* preset 1 */ { .snps = { 40, 0, 8 } }, /* preset 2 */ { .snps = { 37, 0, 11 } }, /* preset 3 */ { .snps = { 33, 0, 15 } }, /* preset 4 */ { .snps = { 46, 2, 0 } }, /* preset 5 */ { .snps = { 42, 2, 4 } }, /* preset 6 */ { .snps = { 38, 2, 8 } }, /* preset 7 */ { .snps = { 35, 2, 11 } }, /* preset 8 */ { .snps = { 33, 2, 13 } }, /* preset 9 */ { .snps = { 44, 4, 0 } }, /* preset 10 */ { .snps = { 40, 4, 4 } }, /* preset 11 */ { .snps = { 37, 4, 7 } }, /* preset 12 */ { .snps = { 33, 4, 11 } }, /* preset 13 */ { .snps = { 40, 8, 0 } }, /* preset 14 */ { .snps = { 30, 2, 2 } }, /* preset 15 */ }; /* HDMI2.0 */ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_hdmi[] = { { .snps = { 48, 0, 0 } }, /* preset 0 */ { .snps = { 38, 4, 6 } }, /* preset 1 */ { .snps = { 36, 4, 8 } }, /* preset 2 */ { .snps = { 34, 4, 10 } }, /* preset 3 */ { .snps = { 32, 4, 12 } }, /* preset 4 */ }; static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = { .entries = _mtl_c20_trans_hdmi, .num_entries = ARRAY_SIZE(_mtl_c20_trans_hdmi), .hdmi_default_entry = 0, }; static const struct intel_ddi_buf_trans mtl_c20_trans_dp14 = { .entries = _mtl_c20_trans_dp14, .num_entries = ARRAY_SIZE(_mtl_c20_trans_dp14), .hdmi_default_entry = ARRAY_SIZE(_mtl_c20_trans_dp14) - 1, }; static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = { .entries = _mtl_c20_trans_uhbr, .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr), }; bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) { return table == &tgl_combo_phy_trans_edp_hbr2_hobl; } static bool use_edp_hobl(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed; } static bool use_edp_low_vswing(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; return connector->panel.vbt.edp.low_vswing; } static const struct intel_ddi_buf_trans * intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries) { *num_entries = trans->num_entries; return trans; } static const struct intel_ddi_buf_trans * hsw_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) return intel_get_buf_trans(&hsw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&hsw_trans_hdmi, n_entries); else return intel_get_buf_trans(&hsw_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * bdw_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) return intel_get_buf_trans(&bdw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bdw_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bdw_trans_edp, n_entries); else return intel_get_buf_trans(&bdw_trans_dp, n_entries); } static int skl_buf_trans_num_entries(enum port port, int n_entries) { /* Only DDIA and DDIE can select the 10th register with DP */ if (port == PORT_A || port == PORT_E) return min(n_entries, 10); else return min(n_entries, 9); } static const struct intel_ddi_buf_trans * _skl_get_buf_trans_dp(struct intel_encoder *encoder, const struct intel_ddi_buf_trans *trans, int *n_entries) { trans = intel_get_buf_trans(trans, n_entries); *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); return trans; } static const struct intel_ddi_buf_trans * skl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * skl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * skl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * kbl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * kbl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * kbl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * bxt_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bxt_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bxt_trans_edp, n_entries); else return intel_get_buf_trans(&bxt_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * icl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { return intel_get_buf_trans(&icl_mg_phy_trans_hbr2_hbr3, n_entries); } else { return intel_get_buf_trans(&icl_mg_phy_trans_rbr_hbr, n_entries); } } static const struct intel_ddi_buf_trans * icl_get_mg_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_mg_phy_trans_hdmi, n_entries); else return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&ehl_combo_phy_trans_edp_hbr2, n_entries); else return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } static const struct intel_ddi_buf_trans * ehl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr2, n_entries); else return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr, n_entries); } static const struct intel_ddi_buf_trans * jsl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && use_edp_low_vswing(encoder)) return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (crtc_state->port_clock > 270000) { if (IS_TIGERLAKE_UY(dev_priv)) { return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2, n_entries); } else { return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr2, n_entries); } } else { return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries); } } static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * tgl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&dg1_combo_phy_trans_dp_hbr2_hbr3, n_entries); else return intel_get_buf_trans(&dg1_combo_phy_trans_dp_rbr_hbr, n_entries); } static const struct intel_ddi_buf_trans * dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); else return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * dg1_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr2_hbr3, n_entries); else return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * rkl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * adls_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&adls_combo_phy_trans_dp_hbr2_hbr3, n_entries); else return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * adls_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries); else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries); else return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * adls_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr2_hbr3, n_entries); else return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3, n_entries); } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2, n_entries); } return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * adlp_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr2, n_entries); } else { return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr, n_entries); } } static const struct intel_ddi_buf_trans * tgl_get_dkl_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries); else return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (crtc_state->port_clock > 270000) { return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr2_hbr3, n_entries); } else { return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr, n_entries); } } static const struct intel_ddi_buf_trans * adlp_get_dkl_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries); else return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct intel_ddi_buf_trans * dg2_get_snps_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state)) return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries); else return intel_get_buf_trans(&dg2_snps_trans, n_entries); } static const struct intel_ddi_buf_trans * mtl_get_cx0_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000) return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy))) return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries); else if (!intel_is_c10phy(i915, phy)) return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries); else return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries); } void intel_ddi_buf_trans_init(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); if (DISPLAY_VER(i915) >= 14) { encoder->get_buf_trans = mtl_get_cx0_buf_trans; } else if (IS_DG2(i915)) { encoder->get_buf_trans = dg2_get_snps_buf_trans; } else if (IS_ALDERLAKE_P(i915)) { if (intel_phy_is_combo(i915, phy)) encoder->get_buf_trans = adlp_get_combo_buf_trans; else encoder->get_buf_trans = adlp_get_dkl_buf_trans; } else if (IS_ALDERLAKE_S(i915)) { encoder->get_buf_trans = adls_get_combo_buf_trans; } else if (IS_ROCKETLAKE(i915)) { encoder->get_buf_trans = rkl_get_combo_buf_trans; } else if (IS_DG1(i915)) { encoder->get_buf_trans = dg1_get_combo_buf_trans; } else if (DISPLAY_VER(i915) >= 12) { if (intel_phy_is_combo(i915, phy)) encoder->get_buf_trans = tgl_get_combo_buf_trans; else encoder->get_buf_trans = tgl_get_dkl_buf_trans; } else if (DISPLAY_VER(i915) == 11) { if (IS_PLATFORM(i915, INTEL_JASPERLAKE)) encoder->get_buf_trans = jsl_get_combo_buf_trans; else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE)) encoder->get_buf_trans = ehl_get_combo_buf_trans; else if (intel_phy_is_combo(i915, phy)) encoder->get_buf_trans = icl_get_combo_buf_trans; else encoder->get_buf_trans = icl_get_mg_buf_trans; } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { encoder->get_buf_trans = bxt_get_buf_trans; } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) { encoder->get_buf_trans = kbl_y_get_buf_trans; } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) { encoder->get_buf_trans = kbl_u_get_buf_trans; } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) { encoder->get_buf_trans = kbl_get_buf_trans; } else if (IS_SKYLAKE_ULX(i915)) { encoder->get_buf_trans = skl_y_get_buf_trans; } else if (IS_SKYLAKE_ULT(i915)) { encoder->get_buf_trans = skl_u_get_buf_trans; } else if (IS_SKYLAKE(i915)) { encoder->get_buf_trans = skl_get_buf_trans; } else if (IS_BROADWELL(i915)) { encoder->get_buf_trans = bdw_get_buf_trans; } else if (IS_HASWELL(i915)) { encoder->get_buf_trans = hsw_get_buf_trans; } else { MISSING_CASE(INTEL_INFO(i915)->platform); } }
linux-master
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * DOC: atomic plane helpers * * The functions here are used by the atomic plane helper functions to * implement legacy plane updates (i.e., drm_plane->update_plane() and * drm_plane->disable_plane()). This allows plane updates to use the * atomic state infrastructure and perform plane updates as separate * prepare/check/commit/cleanup steps. */ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include "i915_config.h" #include "i915_reg.h" #include "intel_atomic_plane.h" #include "intel_cdclk.h" #include "intel_display_rps.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "skl_scaler.h" #include "skl_watermark.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) { memset(plane_state, 0, sizeof(*plane_state)); __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base); plane_state->scaler_id = -1; } struct intel_plane *intel_plane_alloc(void) { struct intel_plane_state *plane_state; struct intel_plane *plane; plane = kzalloc(sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); if (!plane_state) { kfree(plane); return ERR_PTR(-ENOMEM); } intel_plane_state_reset(plane_state, plane); plane->base.state = &plane_state->uapi; return plane; } void intel_plane_free(struct intel_plane *plane) { intel_plane_destroy_state(&plane->base, plane->base.state); kfree(plane); } /** * intel_plane_duplicate_state - duplicate plane state * @plane: drm plane * * Allocates and returns a copy of the plane state (both common and * Intel-specific) for the specified plane. * * Returns: The newly allocated plane state, or NULL on failure. */ struct drm_plane_state * intel_plane_duplicate_state(struct drm_plane *plane) { struct intel_plane_state *intel_state; intel_state = to_intel_plane_state(plane->state); intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL); if (!intel_state) return NULL; __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi); intel_state->ggtt_vma = NULL; intel_state->dpt_vma = NULL; intel_state->flags = 0; /* add reference to fb */ if (intel_state->hw.fb) drm_framebuffer_get(intel_state->hw.fb); return &intel_state->uapi; } /** * intel_plane_destroy_state - destroy plane state * @plane: drm plane * @state: state object to destroy * * Destroys the plane state (both common and Intel-specific) for the * specified plane. */ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { struct intel_plane_state *plane_state = to_intel_plane_state(state); drm_WARN_ON(plane->dev, plane_state->ggtt_vma); drm_WARN_ON(plane->dev, plane_state->dpt_vma); __drm_atomic_helper_plane_destroy_state(&plane_state->uapi); if (plane_state->hw.fb) drm_framebuffer_put(plane_state->hw.fb); kfree(plane_state); } unsigned int intel_adjusted_rate(const struct drm_rect *src, const struct drm_rect *dst, unsigned int rate) { unsigned int src_w, src_h, dst_w, dst_h; src_w = drm_rect_width(src) >> 16; src_h = drm_rect_height(src) >> 16; dst_w = drm_rect_width(dst); dst_h = drm_rect_height(dst); /* Downscaling limits the maximum pixel rate */ dst_w = min(src_w, dst_w); dst_h = min(src_h, dst_h); return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h), dst_w * dst_h); } unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { /* * Note we don't check for plane visibility here as * we want to use this when calculating the cursor * watermarks even if the cursor is fully offscreen. * That depends on the src/dst rectangles being * correctly populated whenever the watermark code * considers the cursor to be visible, whether or not * it is actually visible. * * See: intel_wm_plane_visible() and intel_check_cursor() */ return intel_adjusted_rate(&plane_state->uapi.src, &plane_state->uapi.dst, crtc_state->pixel_rate); } unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; if (!plane_state->uapi.visible) return 0; return intel_plane_pixel_rate(crtc_state, plane_state) * fb->format->cpp[color_plane]; } static bool use_min_ddb(const struct intel_crtc_state *crtc_state, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); return DISPLAY_VER(i915) >= 13 && crtc_state->uapi.async_flip && plane->async_flip; } static unsigned int intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int width, height; unsigned int rel_data_rate; if (plane->id == PLANE_CURSOR) return 0; if (!plane_state->uapi.visible) return 0; /* * We calculate extra ddb based on ratio plane rate/total data rate * in case, in some cases we should not allocate extra ddb for the plane, * so do not count its data rate, if this is the case. */ if (use_min_ddb(crtc_state, plane)) return 0; /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ width = drm_rect_width(&plane_state->uapi.src) >> 16; height = drm_rect_height(&plane_state->uapi.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ if (color_plane == 1) { width /= 2; height /= 2; } rel_data_rate = width * height * fb->format->cpp[color_plane]; return intel_adjusted_rate(&plane_state->uapi.src, &plane_state->uapi.dst, rel_data_rate); } int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, struct intel_plane *plane, bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); const struct intel_cdclk_state *cdclk_state; const struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; if (!plane_state->uapi.visible || !plane->min_cdclk) return 0; old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); new_crtc_state->min_cdclk[plane->id] = plane->min_cdclk(new_crtc_state, plane_state); /* * No need to check against the cdclk state if * the min cdclk for the plane doesn't increase. * * Ie. we only ever increase the cdclk due to plane * requirements. This can reduce back and forth * display blinking due to constant cdclk changes. */ if (new_crtc_state->min_cdclk[plane->id] <= old_crtc_state->min_cdclk[plane->id]) return 0; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); /* * No need to recalculate the cdclk state if * the min cdclk for the pipe doesn't increase. * * Ie. we only ever increase the cdclk due to plane * requirements. This can reduce back and forth * display blinking due to constant cdclk changes. */ if (new_crtc_state->min_cdclk[plane->id] <= cdclk_state->min_cdclk[crtc->pipe]) return 0; drm_dbg_kms(&dev_priv->drm, "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n", plane->base.base.id, plane->base.name, new_crtc_state->min_cdclk[plane->id], crtc->base.base.id, crtc->base.name, cdclk_state->min_cdclk[crtc->pipe]); *need_cdclk_calc = true; return 0; } static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) { if (plane_state->hw.fb) drm_framebuffer_put(plane_state->hw.fb); memset(&plane_state->hw, 0, sizeof(plane_state->hw)); } void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state, struct intel_crtc *crtc) { intel_plane_clear_hw_state(plane_state); /* * For the bigjoiner slave uapi.crtc will point at * the master crtc. So we explicitly assign the right * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates * the plane is logically enabled on the uapi level. */ plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL; plane_state->hw.fb = from_plane_state->uapi.fb; if (plane_state->hw.fb) drm_framebuffer_get(plane_state->hw.fb); plane_state->hw.alpha = from_plane_state->uapi.alpha; plane_state->hw.pixel_blend_mode = from_plane_state->uapi.pixel_blend_mode; plane_state->hw.rotation = from_plane_state->uapi.rotation; plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding; plane_state->hw.color_range = from_plane_state->uapi.color_range; plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter; plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi); plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi); } void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state) { intel_plane_clear_hw_state(plane_state); memcpy(&plane_state->hw, &from_plane_state->hw, sizeof(plane_state->hw)); if (plane_state->hw.fb) drm_framebuffer_get(plane_state->hw.fb); } void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); crtc_state->active_planes &= ~BIT(plane->id); crtc_state->scaled_planes &= ~BIT(plane->id); crtc_state->nv12_planes &= ~BIT(plane->id); crtc_state->c8_planes &= ~BIT(plane->id); crtc_state->async_flip_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; crtc_state->data_rate_y[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; crtc_state->rel_data_rate_y[plane->id] = 0; crtc_state->min_cdclk[plane->id] = 0; plane_state->uapi.visible = false; } /* FIXME nuke when all wm code is atomic */ static bool intel_wm_need_update(const struct intel_plane_state *cur, struct intel_plane_state *new) { /* Update watermarks on tiling or size changes. */ if (new->uapi.visible != cur->uapi.visible) return true; if (!cur->hw.fb || !new->hw.fb) return false; if (cur->hw.fb->modifier != new->hw.fb->modifier || cur->hw.rotation != new->hw.rotation || drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) return true; return false; } static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state) { int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; int dst_w = drm_rect_width(&plane_state->uapi.dst); int dst_h = drm_rect_height(&plane_state->uapi.dst); return src_w != dst_w || src_h != dst_h; } static bool intel_plane_do_async_flip(struct intel_plane *plane, const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); if (!plane->async_flip) return false; if (!new_crtc_state->uapi.async_flip) return false; /* * In platforms after DISPLAY13, we might need to override * first async flip in order to change watermark levels * as part of optimization. * So for those, we are checking if this is a first async flip. * For platforms earlier than DISPLAY13 we always do async flip. */ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip; } static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, const struct intel_plane_state *new_plane_state) { struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); bool old_visible = old_plane_state->uapi.visible; bool new_visible = new_plane_state->uapi.visible; u32 old_ctl = old_plane_state->ctl; u32 new_ctl = new_plane_state->ctl; bool modeset, turn_on, turn_off; if (plane->id == PLANE_CURSOR) return false; modeset = intel_crtc_needs_modeset(new_crtc_state); turn_off = old_visible && (!new_visible || modeset); turn_on = new_visible && (!old_visible || modeset); /* Must disable CxSR around plane enable/disable */ if (turn_on || turn_off) return true; if (!old_visible || !new_visible) return false; /* * Most plane control register updates are blocked while in CxSR. * * Tiling mode is one exception where the primary plane can * apparently handle it, whereas the sprites can not (the * sprite issue being only relevant on VLV/CHV where CxSR * is actually possible with a sprite enabled). */ if (plane->id == PLANE_PRIMARY) { old_ctl &= ~DISP_TILED; new_ctl &= ~DISP_TILED; } return old_ctl != new_ctl; } static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *new_plane_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); bool mode_changed = intel_crtc_needs_modeset(new_crtc_state); bool was_crtc_enabled = old_crtc_state->hw.active; bool is_crtc_enabled = new_crtc_state->hw.active; bool turn_off, turn_on, visible, was_visible; int ret; if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { ret = skl_update_scaler_plane(new_crtc_state, new_plane_state); if (ret) return ret; } was_visible = old_plane_state->uapi.visible; visible = new_plane_state->uapi.visible; if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) was_visible = false; /* * Visibility is calculated as if the crtc was on, but * after scaler setup everything depends on it being off * when the crtc isn't active. * * FIXME this is wrong for watermarks. Watermarks should also * be computed as if the pipe would be active. Perhaps move * per-plane wm computation to the .check_plane() hook, and * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { intel_plane_set_invisible(new_crtc_state, new_plane_state); visible = false; } if (!was_visible && !visible) return 0; turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); drm_dbg_atomic(&dev_priv->drm, "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", crtc->base.base.id, crtc->base.name, plane->base.base.id, plane->base.name, was_visible, visible, turn_off, turn_on, mode_changed); if (turn_on) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) new_crtc_state->update_wm_pre = true; } else if (turn_off) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) new_crtc_state->update_wm_post = true; } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { /* FIXME bollocks */ new_crtc_state->update_wm_pre = true; new_crtc_state->update_wm_post = true; } } if (visible || was_visible) new_crtc_state->fb_bits |= plane->frontbuffer_bit; if (HAS_GMCH(dev_priv) && i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) new_crtc_state->disable_cxsr = true; /* * ILK/SNB DVSACNTR/Sprite Enable * IVB SPR_CTL/Sprite Enable * "When in Self Refresh Big FIFO mode, a write to enable the * plane will be internally buffered and delayed while Big FIFO * mode is exiting." * * Which means that enabling the sprite can take an extra frame * when we start in big FIFO mode (LP1+). Thus we need to drop * down to LP0 and wait for vblank in order to make sure the * sprite gets enabled on the next vblank after the register write. * Doing otherwise would risk enabling the sprite one frame after * we've already signalled flip completion. We can resume LP1+ * once the sprite has been enabled. * * * WaCxSRDisabledForSpriteScaling:ivb * IVB SPR_SCALE/Scaling Enable * "Low Power watermarks must be disabled for at least one * frame before enabling sprite scaling, and kept disabled * until sprite scaling is disabled." * * ILK/SNB DVSASCALE/Scaling Enable * "When in Self Refresh Big FIFO mode, scaling enable will be * masked off while Big FIFO mode is exiting." * * Despite the w/a only being listed for IVB we assume that * the ILK/SNB note has similar ramifications, hence we apply * the w/a on all three platforms. * * With experimental results seems this is needed also for primary * plane, not only sprite plane. */ if (plane->id != PLANE_CURSOR && (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!intel_plane_is_scaled(old_plane_state) && intel_plane_is_scaled(new_plane_state)))) new_crtc_state->disable_lp_wm = true; if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { new_crtc_state->do_async_flip = true; new_crtc_state->async_flip_planes |= BIT(plane->id); } return 0; } int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *new_plane_state) { struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); const struct drm_framebuffer *fb = new_plane_state->hw.fb; int ret; intel_plane_set_invisible(new_crtc_state, new_plane_state); new_crtc_state->enabled_planes &= ~BIT(plane->id); if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; ret = plane->check_plane(new_crtc_state, new_plane_state); if (ret) return ret; if (fb) new_crtc_state->enabled_planes |= BIT(plane->id); /* FIXME pre-g4x don't work like this */ if (new_plane_state->uapi.visible) new_crtc_state->active_planes |= BIT(plane->id); if (new_plane_state->uapi.visible && intel_plane_is_scaled(new_plane_state)) new_crtc_state->scaled_planes |= BIT(plane->id); if (new_plane_state->uapi.visible && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) new_crtc_state->nv12_planes |= BIT(plane->id); if (new_plane_state->uapi.visible && fb->format->format == DRM_FORMAT_C8) new_crtc_state->c8_planes |= BIT(plane->id); if (new_plane_state->uapi.visible || old_plane_state->uapi.visible) new_crtc_state->update_planes |= BIT(plane->id); if (new_plane_state->uapi.visible && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { new_crtc_state->data_rate_y[plane->id] = intel_plane_data_rate(new_crtc_state, new_plane_state, 0); new_crtc_state->data_rate[plane->id] = intel_plane_data_rate(new_crtc_state, new_plane_state, 1); new_crtc_state->rel_data_rate_y[plane->id] = intel_plane_relative_data_rate(new_crtc_state, new_plane_state, 0); new_crtc_state->rel_data_rate[plane->id] = intel_plane_relative_data_rate(new_crtc_state, new_plane_state, 1); } else if (new_plane_state->uapi.visible) { new_crtc_state->data_rate[plane->id] = intel_plane_data_rate(new_crtc_state, new_plane_state, 0); new_crtc_state->rel_data_rate[plane->id] = intel_plane_relative_data_rate(new_crtc_state, new_plane_state, 0); } return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state, old_plane_state, new_plane_state); } static struct intel_plane * intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_plane *plane; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { if (plane->id == plane_id) return plane; } return NULL; } int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_master_plane_state; struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) { struct intel_crtc *master_crtc = intel_master_crtc(new_crtc_state); struct intel_plane *master_plane = intel_crtc_get_plane(master_crtc, plane->id); new_master_plane_state = intel_atomic_get_new_plane_state(state, master_plane); } else { new_master_plane_state = new_plane_state; } intel_plane_copy_uapi_to_hw_state(new_plane_state, new_master_plane_state, crtc); new_plane_state->uapi.visible = false; if (!new_crtc_state) return 0; return intel_plane_atomic_check_with_state(old_crtc_state, new_crtc_state, old_plane_state, new_plane_state); } static struct intel_plane * skl_next_plane_to_commit(struct intel_atomic_state *state, struct intel_crtc *crtc, struct skl_ddb_entry ddb[I915_MAX_PLANES], struct skl_ddb_entry ddb_y[I915_MAX_PLANES], unsigned int *update_mask) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; if (*update_mask == 0) return NULL; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { enum plane_id plane_id = plane->id; if (crtc->pipe != plane->pipe || !(*update_mask & BIT(plane_id))) continue; if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id], ddb, I915_MAX_PLANES, plane_id) || skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], ddb_y, I915_MAX_PLANES, plane_id)) continue; *update_mask &= ~BIT(plane_id); ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id]; ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; return plane; } /* should never happen */ drm_WARN_ON(state->base.dev, 1); return NULL; } void intel_plane_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_update_noarm(plane, crtc); if (plane->update_noarm) plane->update_noarm(plane, crtc_state, plane_state); } void intel_plane_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_update_arm(plane, crtc); if (crtc_state->do_async_flip && plane->async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else plane->update_arm(plane, crtc_state, plane_state); } void intel_plane_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_disable_arm(plane, crtc); plane->disable_arm(plane, crtc_state); } void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u32 update_mask = new_crtc_state->update_planes; struct intel_plane_state *new_plane_state; struct intel_plane *plane; int i; if (new_crtc_state->do_async_flip) return; /* * Since we only write non-arming registers here, * the order does not matter even for skl+. */ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { if (crtc->pipe != plane->pipe || !(update_mask & BIT(plane->id))) continue; /* TODO: for mailbox updates this should be skipped */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); } } static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct skl_ddb_entry ddb[I915_MAX_PLANES]; struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; u32 update_mask = new_crtc_state->update_planes; struct intel_plane *plane; memcpy(ddb, old_crtc_state->wm.skl.plane_ddb, sizeof(old_crtc_state->wm.skl.plane_ddb)); memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y, sizeof(old_crtc_state->wm.skl.plane_ddb_y)); while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) { struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); /* * TODO: for mailbox updates intel_plane_update_noarm() * would have to be called here as well. */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) intel_plane_update_arm(plane, new_crtc_state, new_plane_state); else intel_plane_disable_arm(plane, new_crtc_state); } } static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u32 update_mask = new_crtc_state->update_planes; struct intel_plane_state *new_plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { if (crtc->pipe != plane->pipe || !(update_mask & BIT(plane->id))) continue; /* * TODO: for mailbox updates intel_plane_update_noarm() * would have to be called here as well. */ if (new_plane_state->uapi.visible) intel_plane_update_arm(plane, new_crtc_state, new_plane_state); else intel_plane_disable_arm(plane, new_crtc_state); } } void intel_crtc_planes_update_arm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (DISPLAY_VER(i915) >= 9) skl_crtc_planes_update_arm(state, crtc); else i9xx_crtc_planes_update_arm(state, crtc); } int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, struct intel_crtc_state *crtc_state, int min_scale, int max_scale, bool can_position) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; struct drm_rect *dst = &plane_state->uapi.dst; const struct drm_rect *clip = &crtc_state->pipe_src; unsigned int rotation = plane_state->hw.rotation; int hscale, vscale; if (!fb) { plane_state->uapi.visible = false; return 0; } drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); /* Check scaling */ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", src, true); drm_rect_debug_print("dst: ", dst, false); return -ERANGE; } /* * FIXME: This might need further adjustment for seamless scaling * with phase information, for the 2p2 and 2p1 scenarios. */ plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip); drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); if (!can_position && plane_state->uapi.visible && !drm_rect_equals(dst, clip)) { drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", clip, false); return -EINVAL; } /* final plane coordinates will be relative to the plane's pipe */ drm_rect_translate(dst, -clip->x1, -clip->y1); return 0; } int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; u32 src_x, src_y, src_w, src_h, hsub, vsub; bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); /* * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS * abuses hsub/vsub so we can't use them here. But as they * are limited to 32bpp RGB formats we don't actually need * to check anything. */ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) return 0; /* * Hardware doesn't handle subpixel coordinates. * Adjust to (macro)pixel boundary, but be careful not to * increase the source viewport size, because that could * push the downscaling factor out of bounds. */ src_x = src->x1 >> 16; src_w = drm_rect_width(src) >> 16; src_y = src->y1 >> 16; src_h = drm_rect_height(src) >> 16; drm_rect_init(src, src_x << 16, src_y << 16, src_w << 16, src_h << 16); if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { hsub = 2; vsub = 2; } else { hsub = fb->format->hsub; vsub = fb->format->vsub; } if (rotated) hsub = vsub = max(hsub, vsub); if (src_x % hsub || src_w % hsub) { drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", src_x, src_w, hsub, str_yes_no(rotated)); return -EINVAL; } if (src_y % vsub || src_h % vsub) { drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", src_y, src_h, vsub, str_yes_no(rotated)); return -EINVAL; } return 0; } /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @_plane: drm plane to prepare for * @_new_plane_state: the plane state being prepared * * Prepares a framebuffer for usage on a display plane. Generally this * involves pinning the underlying object and updating the frontbuffer tracking * bits. Some older platforms need special physical address handling for * cursor planes. * * Returns 0 on success, negative error code on failure. */ static int intel_prepare_plane_fb(struct drm_plane *_plane, struct drm_plane_state *_new_plane_state) { struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; struct intel_plane *plane = to_intel_plane(_plane); struct intel_plane_state *new_plane_state = to_intel_plane_state(_new_plane_state); struct intel_atomic_state *state = to_intel_atomic_state(new_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); int ret; if (old_obj) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, to_intel_crtc(old_plane_state->hw.crtc)); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. Note that we rely on userspace rendering * into the buffer attached to the pipe they are waiting * on. If not, userspace generates a GPU hang with IPEHR * point to the MI_WAIT_FOR_EVENT. * * This should only fail upon a hung GPU, in which case we * can safely continue. */ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { ret = i915_sw_fence_await_reservation(&state->commit_ready, old_obj->base.resv, false, 0, GFP_KERNEL); if (ret < 0) return ret; } } if (new_plane_state->uapi.fence) { /* explicit fencing */ i915_gem_fence_wait_priority(new_plane_state->uapi.fence, &attr); ret = i915_sw_fence_await_dma_fence(&state->commit_ready, new_plane_state->uapi.fence, i915_fence_timeout(dev_priv), GFP_KERNEL); if (ret < 0) return ret; } if (!obj) return 0; ret = intel_plane_pin_fb(new_plane_state); if (ret) return ret; i915_gem_object_wait_priority(obj, 0, &attr); if (!new_plane_state->uapi.fence) { /* implicit fencing */ struct dma_resv_iter cursor; struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&state->commit_ready, obj->base.resv, false, i915_fence_timeout(dev_priv), GFP_KERNEL); if (ret < 0) goto unpin_fb; dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_WRITE); dma_resv_for_each_fence_unlocked(&cursor, fence) { intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); } dma_resv_iter_end(&cursor); } else { intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, new_plane_state->uapi.fence); } /* * We declare pageflips to be interactive and so merit a small bias * towards upclocking to deliver the frame on time. By only changing * the RPS thresholds to sample more regularly and aim for higher * clocks we can hopefully deliver low power workloads (like kodi) * that are not quite steady state without resorting to forcing * maximum clocks following a vblank miss (see do_rps_boost()). */ intel_display_rps_mark_interactive(dev_priv, state, true); return 0; unpin_fb: intel_plane_unpin_fb(new_plane_state); return ret; } /** * intel_cleanup_plane_fb - Cleans up an fb after plane use * @plane: drm plane to clean up for * @_old_plane_state: the state from the previous modeset * * Cleans up a framebuffer that has just been removed from a plane. */ static void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *_old_plane_state) { struct intel_plane_state *old_plane_state = to_intel_plane_state(_old_plane_state); struct intel_atomic_state *state = to_intel_atomic_state(old_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); if (!obj) return; intel_display_rps_mark_interactive(dev_priv, state, false); /* Should only be called after a successful intel_prepare_plane_fb()! */ intel_plane_unpin_fb(old_plane_state); } static const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, }; void intel_plane_helper_add(struct intel_plane *plane) { drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); }
linux-master
drivers/gpu/drm/i915/display/intel_atomic_plane.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation * * High level crtc/connector/encoder modeset state verification. */ #include <drm/drm_atomic_state_helper.h> #include "i915_drv.h" #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_crtc_state_dump.h" #include "intel_cx0_phy.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_modeset_verify.h" #include "intel_snps_phy.h" #include "skl_watermark.h" /* * Cross check the actual hw state with our own modeset state tracking (and its * internal consistency). */ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id, connector->base.name); if (connector->get_hw_state(connector)) { struct intel_encoder *encoder = intel_attached_encoder(connector); I915_STATE_WARN(i915, !crtc_state, "connector enabled without attached crtc\n"); if (!crtc_state) return; I915_STATE_WARN(i915, !crtc_state->hw.active, "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; I915_STATE_WARN(i915, conn_state->best_encoder != &encoder->base, "atomic encoder doesn't match attached encoder\n"); I915_STATE_WARN(i915, conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { I915_STATE_WARN(i915, crtc_state && crtc_state->hw.active, "attached crtc is active, but connector isn't\n"); I915_STATE_WARN(i915, !crtc_state && conn_state->best_encoder, "best encoder set without crtc!\n"); } } static void verify_connector_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; int i; for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct drm_encoder *encoder = connector->encoder; struct intel_crtc_state *crtc_state = NULL; if (new_conn_state->crtc != &crtc->base) continue; if (crtc) crtc_state = intel_atomic_get_new_crtc_state(state, crtc); intel_connector_verify_state(crtc_state, new_conn_state); I915_STATE_WARN(to_i915(connector->dev), new_conn_state->best_encoder != encoder, "connector's atomic encoder doesn't match legacy encoder\n"); } } static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, const struct intel_crtc_state *pipe_config) { if (pipe_config->has_pch_encoder) { int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), &pipe_config->fdi_m_n); int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; /* * FDI already provided one idea for the dotclock. * Yell if the encoder disagrees. Allow for slight * rounding differences. */ drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1, "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", fdi_dotclock, dotclock); } } static void verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { struct intel_encoder *encoder; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; int i; for_each_intel_encoder(&dev_priv->drm, encoder) { bool enabled = false, found = false; enum pipe pipe; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name); for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { if (old_conn_state->best_encoder == &encoder->base) found = true; if (new_conn_state->best_encoder != &encoder->base) continue; found = true; enabled = true; I915_STATE_WARN(dev_priv, new_conn_state->crtc != encoder->base.crtc, "connector's crtc doesn't match encoder crtc\n"); } if (!found) continue; I915_STATE_WARN(dev_priv, !!encoder->base.crtc != enabled, "encoder's enabled state mismatch (expected %i, found %i)\n", !!encoder->base.crtc, enabled); if (!encoder->base.crtc) { bool active; active = encoder->get_hw_state(encoder, &pipe); I915_STATE_WARN(dev_priv, active, "encoder detached but still enabled on pipe %c.\n", pipe_name(pipe)); } } } static void verify_crtc_state(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; struct intel_crtc *master_crtc; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); intel_crtc_state_reset(old_crtc_state, crtc); old_crtc_state->uapi.state = state; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); pipe_config->hw.enable = new_crtc_state->hw.enable; intel_crtc_get_pipe_config(pipe_config); /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv) && pipe_config->hw.active) pipe_config->hw.active = new_crtc_state->hw.active; I915_STATE_WARN(dev_priv, new_crtc_state->hw.active != pipe_config->hw.active, "crtc active state doesn't match with hw state (expected %i, found %i)\n", new_crtc_state->hw.active, pipe_config->hw.active); I915_STATE_WARN(dev_priv, crtc->active != new_crtc_state->hw.active, "transitional active state does not match atomic hw state (expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); master_crtc = intel_master_crtc(new_crtc_state); for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { enum pipe pipe; bool active; active = encoder->get_hw_state(encoder, &pipe); I915_STATE_WARN(dev_priv, active != new_crtc_state->hw.active, "[ENCODER:%i] active %i with crtc active %i\n", encoder->base.base.id, active, new_crtc_state->hw.active); I915_STATE_WARN(dev_priv, active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); if (active) intel_encoder_get_config(encoder, pipe_config); } if (!new_crtc_state->hw.active) return; intel_pipe_config_sanity_check(dev_priv, pipe_config); if (!intel_pipe_config_compare(new_crtc_state, pipe_config, false)) { I915_STATE_WARN(dev_priv, 1, "pipe state doesn't match!\n"); intel_crtc_state_dump(pipe_config, NULL, "hw state"); intel_crtc_state_dump(new_crtc_state, NULL, "sw state"); } } void intel_modeset_verify_crtc(struct intel_crtc *crtc, struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { if (!intel_crtc_needs_modeset(new_crtc_state) && !intel_crtc_needs_fastset(new_crtc_state)) return; intel_wm_state_verify(crtc, new_crtc_state); verify_connector_state(state, crtc); verify_crtc_state(crtc, old_crtc_state, new_crtc_state); intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state); intel_mpllb_state_verify(state, new_crtc_state); intel_c10pll_state_verify(state, new_crtc_state); } void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { verify_encoder_state(dev_priv, state); verify_connector_state(state, NULL); intel_shared_dpll_verify_disabled(dev_priv); }
linux-master
drivers/gpu/drm/i915/display/intel_modeset_verify.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include <linux/firmware.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" /** * DOC: DMC Firmware Support * * From gen9 onwards we have newly added DMC (Display microcontroller) in display * engine to save and restore the state of display engine when it enter into * low-power state and comes back to normal. */ enum intel_dmc_id { DMC_FW_MAIN = 0, DMC_FW_PIPEA, DMC_FW_PIPEB, DMC_FW_PIPEC, DMC_FW_PIPED, DMC_FW_MAX }; struct intel_dmc { struct drm_i915_private *i915; struct work_struct work; const char *fw_path; u32 max_fw_size; /* bytes */ u32 version; struct dmc_fw_info { u32 mmio_count; i915_reg_t mmioaddr[20]; u32 mmiodata[20]; u32 dmc_offset; u32 start_mmioaddr; u32 dmc_fw_size; /*dwords */ u32 *payload; bool present; } dmc_info[DMC_FW_MAX]; }; /* Note: This may be NULL. */ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) { return i915->display.dmc.dmc; } #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) #define DMC_VERSION_MAJOR(version) ((version) >> 16) #define DMC_VERSION_MINOR(version) ((version) & 0xffff) #define DMC_PATH(platform) \ "i915/" __stringify(platform) "_dmc.bin" /* * New DMC additions should not use this. This is used solely to remain * compatible with systems that have not yet updated DMC blobs to use * unversioned file names. */ #define DMC_LEGACY_PATH(platform, major, minor) \ "i915/" \ __stringify(platform) "_dmc_ver" \ __stringify(major) "_" \ __stringify(minor) ".bin" #define XELPDP_DMC_MAX_FW_SIZE 0x7000 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE #define MTL_DMC_PATH DMC_PATH(mtl) MODULE_FIRMWARE(MTL_DMC_PATH); #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08) MODULE_FIRMWARE(DG2_DMC_PATH); #define ADLP_DMC_PATH DMC_PATH(adlp) #define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16) MODULE_FIRMWARE(ADLP_DMC_PATH); MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH); #define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01) MODULE_FIRMWARE(ADLS_DMC_PATH); #define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02) MODULE_FIRMWARE(DG1_DMC_PATH); #define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03) MODULE_FIRMWARE(RKL_DMC_PATH); #define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12) MODULE_FIRMWARE(TGL_DMC_PATH); #define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09) #define ICL_DMC_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(ICL_DMC_PATH); #define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04) #define GLK_DMC_MAX_FW_SIZE 0x4000 MODULE_FIRMWARE(GLK_DMC_PATH); #define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04) #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE MODULE_FIRMWARE(KBL_DMC_PATH); #define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27) #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE MODULE_FIRMWARE(SKL_DMC_PATH); #define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07) #define BXT_DMC_MAX_FW_SIZE 0x3000 MODULE_FIRMWARE(BXT_DMC_PATH); #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF #define PACKAGE_MAX_FW_INFO_ENTRIES 20 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 #define DMC_V1_MAX_MMIO_COUNT 8 #define DMC_V3_MAX_MMIO_COUNT 20 #define DMC_V1_MMIO_START_RANGE 0x80000 #define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A)) struct intel_css_header { /* 0x09 for DMC */ u32 module_type; /* Includes the DMC specific header in dwords */ u32 header_len; /* always value would be 0x10000 */ u32 header_ver; /* Not used */ u32 module_id; /* Not used */ u32 module_vendor; /* in YYYYMMDD format */ u32 date; /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ u32 size; /* Not used */ u32 key_size; /* Not used */ u32 modulus_size; /* Not used */ u32 exponent_size; /* Not used */ u32 reserved1[12]; /* Major Minor */ u32 version; /* Not used */ u32 reserved2[8]; /* Not used */ u32 kernel_header_info; } __packed; struct intel_fw_info { u8 reserved1; /* reserved on package_header version 1, must be 0 on version 2 */ u8 dmc_id; /* Stepping (A, B, C, ..., *). * is a wildcard */ char stepping; /* Sub-stepping (0, 1, ..., *). * is a wildcard */ char substepping; u32 offset; u32 reserved2; } __packed; struct intel_package_header { /* DMC container header length in dwords */ u8 header_len; /* 0x01, 0x02 */ u8 header_ver; u8 reserved[10]; /* Number of valid entries in the FWInfo array below */ u32 num_entries; } __packed; struct intel_dmc_header_base { /* always value would be 0x40403E3E */ u32 signature; /* DMC binary header length */ u8 header_len; /* 0x01 */ u8 header_ver; /* Reserved */ u16 dmcc_ver; /* Major, Minor */ u32 project; /* Firmware program size (excluding header) in dwords */ u32 fw_size; /* Major Minor version */ u32 fw_version; } __packed; struct intel_dmc_header_v1 { struct intel_dmc_header_base base; /* Number of valid MMIO cycles present. */ u32 mmio_count; /* MMIO address */ u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT]; /* MMIO data */ u32 mmiodata[DMC_V1_MAX_MMIO_COUNT]; /* FW filename */ char dfile[32]; u32 reserved1[2]; } __packed; struct intel_dmc_header_v3 { struct intel_dmc_header_base base; /* DMC RAM start MMIO address */ u32 start_mmioaddr; u32 reserved[9]; /* FW filename */ char dfile[32]; /* Number of valid MMIO cycles present. */ u32 mmio_count; /* MMIO address */ u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT]; /* MMIO data */ u32 mmiodata[DMC_V3_MAX_MMIO_COUNT]; } __packed; struct stepping_info { char stepping; char substepping; }; #define for_each_dmc_id(__dmc_id) \ for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) { return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; } static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) { struct intel_dmc *dmc = i915_to_dmc(i915); return dmc && dmc->dmc_info[dmc_id].payload; } bool intel_dmc_has_payload(struct drm_i915_private *i915) { return has_dmc_id_fw(i915, DMC_FW_MAIN); } static const struct stepping_info * intel_get_stepping_info(struct drm_i915_private *i915, struct stepping_info *si) { const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step); si->stepping = step_name[0]; si->substepping = step_name[1]; return si; } static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) { /* The below bit doesn't need to be cleared ever afterwards */ intel_de_rmw(i915, DC_STATE_DEBUG, 0, DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); intel_de_posting_read(i915, DC_STATE_DEBUG); } static void disable_event_handler(struct drm_i915_private *i915, i915_reg_t ctl_reg, i915_reg_t htp_reg) { intel_de_write(i915, ctl_reg, REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, DMC_EVT_CTL_EVENT_ID_FALSE)); intel_de_write(i915, htp_reg, 0); } static void disable_flip_queue_event(struct drm_i915_private *i915, i915_reg_t ctl_reg, i915_reg_t htp_reg) { u32 event_ctl; u32 event_htp; event_ctl = intel_de_read(i915, ctl_reg); event_htp = intel_de_read(i915, htp_reg); if (event_ctl != (DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING | REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, DMC_EVT_CTL_TYPE_EDGE_0_1) | REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) || !event_htp) { drm_dbg_kms(&i915->drm, "Unexpected DMC event configuration (control %08x htp %08x)\n", event_ctl, event_htp); return; } disable_event_handler(i915, ctl_reg, htp_reg); } static bool get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, i915_reg_t *ctl_reg, i915_reg_t *htp_reg) { if (dmc_id == DMC_FW_MAIN) { if (DISPLAY_VER(i915) == 12) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); return true; } } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { if (IS_DG2(i915)) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); return true; } } return false; } static void disable_all_flip_queue_events(struct drm_i915_private *i915) { enum intel_dmc_id dmc_id; /* TODO: check if the following applies to all D13+ platforms. */ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) return; for_each_dmc_id(dmc_id) { i915_reg_t ctl_reg; i915_reg_t htp_reg; if (!has_dmc_id_fw(i915, dmc_id)) continue; if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg)) continue; disable_flip_queue_event(i915, ctl_reg, htp_reg); } } static void disable_all_event_handlers(struct drm_i915_private *i915) { enum intel_dmc_id dmc_id; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ if (DISPLAY_VER(i915) < 12) return; for_each_dmc_id(dmc_id) { int handler; if (!has_dmc_id_fw(i915, dmc_id)) continue; for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) disable_event_handler(i915, DMC_EVT_CTL(i915, dmc_id, handler), DMC_EVT_HTP(i915, dmc_id, handler)); } } static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) { enum pipe pipe; /* * Wa_16015201720:adl-p,dg2 * The WA requires clock gating to be disabled all the time * for pipe A and B. * For pipe C and D clock gating needs to be disabled only * during initializing the firmware. */ if (enable) for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 0, PIPEDMC_GATING_DIS); else for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), PIPEDMC_GATING_DIS, 0); } static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) { /* * Wa_16015201720 * The WA requires clock gating to be disabled all the time * for pipe A and B. */ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); } static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) { if (DISPLAY_VER(i915) >= 14 && enable) mtl_pipedmc_clock_gating_wa(i915); else if (DISPLAY_VER(i915) == 13) adlp_pipedmc_clock_gating_wa(i915, enable); } void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); else intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); } void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); else intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); } /** * intel_dmc_load_program() - write the firmware from memory to register. * @i915: i915 drm device. * * DMC firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ void intel_dmc_load_program(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_dmc *dmc = i915_to_dmc(i915); enum intel_dmc_id dmc_id; u32 i; if (!intel_dmc_has_payload(i915)) return; pipedmc_clock_gating_wa(i915, true); disable_all_event_handlers(i915); assert_rpm_wakelock_held(&i915->runtime_pm); preempt_disable(); for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { intel_de_write_fw(i915, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), dmc->dmc_info[dmc_id].payload[i]); } } preempt_enable(); for_each_dmc_id(dmc_id) { for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], dmc->dmc_info[dmc_id].mmiodata[i]); } } power_domains->dc_state = 0; gen9_set_dc_state_debugmask(i915); /* * Flip queue events need to be disabled before enabling DC5/6. * i915 doesn't use the flip queue feature, so disable it already * here. */ disable_all_flip_queue_events(i915); pipedmc_clock_gating_wa(i915, false); } /** * intel_dmc_disable_program() - disable the firmware * @i915: i915 drm device * * Disable all event handlers in the firmware, making sure the firmware is * inactive after the display is uninitialized. */ void intel_dmc_disable_program(struct drm_i915_private *i915) { if (!intel_dmc_has_payload(i915)) return; pipedmc_clock_gating_wa(i915, true); disable_all_event_handlers(i915); pipedmc_clock_gating_wa(i915, false); } void assert_dmc_loaded(struct drm_i915_private *i915) { struct intel_dmc *dmc = i915_to_dmc(i915); drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); drm_WARN_ONCE(&i915->drm, dmc && !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), "DMC HTP Not fine\n"); } static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, const struct stepping_info *si) { if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) || (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) || /* * If we don't find a more specific one from above two checks, we * then check for the generic one to be sure to work even with * "broken firmware" */ (si->stepping == '*' && si->substepping == fw_info->substepping) || (fw_info->stepping == '*' && fw_info->substepping == '*')) return true; return false; } /* * Search fw_info table for dmc_offset to find firmware binary: num_entries is * already sanitized. */ static void dmc_set_fw_offset(struct intel_dmc *dmc, const struct intel_fw_info *fw_info, unsigned int num_entries, const struct stepping_info *si, u8 package_ver) { struct drm_i915_private *i915 = dmc->i915; enum intel_dmc_id dmc_id; unsigned int i; for (i = 0; i < num_entries; i++) { dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; if (!is_valid_dmc_id(dmc_id)) { drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); continue; } /* More specific versions come first, so we don't even have to * check for the stepping since we already found a previous FW * for this id. */ if (dmc->dmc_info[dmc_id].present) continue; if (fw_info_matches_stepping(&fw_info[i], si)) { dmc->dmc_info[dmc_id].present = true; dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; } } } static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, int header_ver, enum intel_dmc_id dmc_id) { struct drm_i915_private *i915 = dmc->i915; u32 start_range, end_range; int i; if (header_ver == 1) { start_range = DMC_MMIO_START_RANGE; end_range = DMC_MMIO_END_RANGE; } else if (dmc_id == DMC_FW_MAIN) { start_range = TGL_MAIN_MMIO_START; end_range = TGL_MAIN_MMIO_END; } else if (DISPLAY_VER(i915) >= 13) { start_range = ADLP_PIPE_MMIO_START; end_range = ADLP_PIPE_MMIO_END; } else if (DISPLAY_VER(i915) >= 12) { start_range = TGL_PIPE_MMIO_START(dmc_id); end_range = TGL_PIPE_MMIO_END(dmc_id); } else { drm_warn(&i915->drm, "Unknown mmio range for sanity check"); return false; } for (i = 0; i < mmio_count; i++) { if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) return false; } return true; } static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, enum intel_dmc_id dmc_id) { struct drm_i915_private *i915 = dmc->i915; struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; u32 mmio_count, mmio_count_max, start_mmioaddr; u8 *payload; BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); /* * Check if we can access common fields, we will checkc again below * after we have read the version */ if (rem_size < sizeof(struct intel_dmc_header_base)) goto error_truncated; /* Cope with small differences between v1 and v3 */ if (dmc_header->header_ver == 3) { const struct intel_dmc_header_v3 *v3 = (const struct intel_dmc_header_v3 *)dmc_header; if (rem_size < sizeof(struct intel_dmc_header_v3)) goto error_truncated; mmioaddr = v3->mmioaddr; mmiodata = v3->mmiodata; mmio_count = v3->mmio_count; mmio_count_max = DMC_V3_MAX_MMIO_COUNT; /* header_len is in dwords */ header_len_bytes = dmc_header->header_len * 4; start_mmioaddr = v3->start_mmioaddr; dmc_header_size = sizeof(*v3); } else if (dmc_header->header_ver == 1) { const struct intel_dmc_header_v1 *v1 = (const struct intel_dmc_header_v1 *)dmc_header; if (rem_size < sizeof(struct intel_dmc_header_v1)) goto error_truncated; mmioaddr = v1->mmioaddr; mmiodata = v1->mmiodata; mmio_count = v1->mmio_count; mmio_count_max = DMC_V1_MAX_MMIO_COUNT; header_len_bytes = dmc_header->header_len; start_mmioaddr = DMC_V1_MMIO_START_RANGE; dmc_header_size = sizeof(*v1); } else { drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", dmc_header->header_ver); return 0; } if (header_len_bytes != dmc_header_size) { drm_err(&i915->drm, "DMC firmware has wrong dmc header length " "(%u bytes)\n", header_len_bytes); return 0; } /* Cache the dmc header info. */ if (mmio_count > mmio_count_max) { drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); return 0; } if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, dmc_header->header_ver, dmc_id)) { drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); return 0; } for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; } dmc_info->mmio_count = mmio_count; dmc_info->start_mmioaddr = start_mmioaddr; rem_size -= header_len_bytes; /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ payload_size = dmc_header->fw_size * 4; if (rem_size < payload_size) goto error_truncated; if (payload_size > dmc->max_fw_size) { drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); return 0; } dmc_info->dmc_fw_size = dmc_header->fw_size; dmc_info->payload = kmalloc(payload_size, GFP_KERNEL); if (!dmc_info->payload) return 0; payload = (u8 *)(dmc_header) + header_len_bytes; memcpy(dmc_info->payload, payload, payload_size); return header_len_bytes + payload_size; error_truncated: drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); return 0; } static u32 parse_dmc_fw_package(struct intel_dmc *dmc, const struct intel_package_header *package_header, const struct stepping_info *si, size_t rem_size) { struct drm_i915_private *i915 = dmc->i915; u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; if (rem_size < package_size) goto error_truncated; if (package_header->header_ver == 1) { max_entries = PACKAGE_MAX_FW_INFO_ENTRIES; } else if (package_header->header_ver == 2) { max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; } else { drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", package_header->header_ver); return 0; } /* * We should always have space for max_entries, * even if not all are used */ package_size += max_entries * sizeof(struct intel_fw_info); if (rem_size < package_size) goto error_truncated; if (package_header->header_len * 4 != package_size) { drm_err(&i915->drm, "DMC firmware has wrong package header length " "(%u bytes)\n", package_size); return 0; } num_entries = package_header->num_entries; if (WARN_ON(package_header->num_entries > max_entries)) num_entries = max_entries; fw_info = (const struct intel_fw_info *) ((u8 *)package_header + sizeof(*package_header)); dmc_set_fw_offset(dmc, fw_info, num_entries, si, package_header->header_ver); /* dmc_offset is in dwords */ return package_size; error_truncated: drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); return 0; } /* Return number of bytes parsed or 0 on error */ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { struct drm_i915_private *i915 = dmc->i915; if (rem_size < sizeof(struct intel_css_header)) { drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); return 0; } if (sizeof(struct intel_css_header) != (css_header->header_len * 4)) { drm_err(&i915->drm, "DMC firmware has wrong CSS header length " "(%u bytes)\n", (css_header->header_len * 4)); return 0; } dmc->version = css_header->version; return sizeof(struct intel_css_header); } static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { struct drm_i915_private *i915 = dmc->i915; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; struct stepping_info display_info = { '*', '*'}; const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); enum intel_dmc_id dmc_id; u32 readcount = 0; u32 r, offset; if (!fw) return; /* Extract CSS Header information */ css_header = (struct intel_css_header *)fw->data; r = parse_dmc_fw_css(dmc, css_header, fw->size); if (!r) return; readcount += r; /* Extract Package Header information */ package_header = (struct intel_package_header *)&fw->data[readcount]; r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount); if (!r) return; readcount += r; for_each_dmc_id(dmc_id) { if (!dmc->dmc_info[dmc_id].present) continue; offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; if (offset > fw->size) { drm_err(&i915->drm, "Reading beyond the fw_size\n"); continue; } dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } } static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) { drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&i915->display.dmc.wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); } static const char *dmc_fallback_path(struct drm_i915_private *i915) { if (IS_ALDERLAKE_P(i915)) return ADLP_DMC_FALLBACK_PATH; return NULL; } static void dmc_load_work_fn(struct work_struct *work) { struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); struct drm_i915_private *i915 = dmc->i915; const struct firmware *fw = NULL; const char *fallback_path; int err; err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); if (err == -ENOENT && !i915->params.dmc_firmware_path) { fallback_path = dmc_fallback_path(i915); if (fallback_path) { drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", dmc->fw_path, fallback_path); err = request_firmware(&fw, fallback_path, i915->drm.dev); if (err == 0) dmc->fw_path = fallback_path; } } parse_dmc_fw(dmc, fw); if (intel_dmc_has_payload(i915)) { intel_dmc_load_program(i915); intel_dmc_runtime_pm_put(i915); drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } else { drm_notice(&i915->drm, "Failed to load DMC firmware %s." " Disabling runtime power management.\n", dmc->fw_path); drm_notice(&i915->drm, "DMC firmware homepage: %s", INTEL_UC_FIRMWARE_URL); } release_firmware(fw); } /** * intel_dmc_init() - initialize the firmware loading. * @i915: i915 drm device. * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ void intel_dmc_init(struct drm_i915_private *i915) { struct intel_dmc *dmc; if (!HAS_DMC(i915)) return; /* * Obtain a runtime pm reference, until DMC is loaded, to avoid entering * runtime-suspend. * * On error, we return with the rpm wakeref held to prevent runtime * suspend as runtime suspend *requires* a working DMC for whatever * reason. */ intel_dmc_runtime_pm_get(i915); dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); if (!dmc) return; dmc->i915 = i915; INIT_WORK(&dmc->work, dmc_load_work_fn); if (IS_METEORLAKE(i915)) { dmc->fw_path = MTL_DMC_PATH; dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE; } else if (IS_DG2(i915)) { dmc->fw_path = DG2_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_P(i915)) { dmc->fw_path = ADLP_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(i915)) { dmc->fw_path = ADLS_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_DG1(i915)) { dmc->fw_path = DG1_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_ROCKETLAKE(i915)) { dmc->fw_path = RKL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_TIGERLAKE(i915)) { dmc->fw_path = TGL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (DISPLAY_VER(i915) == 11) { dmc->fw_path = ICL_DMC_PATH; dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; } else if (IS_GEMINILAKE(i915)) { dmc->fw_path = GLK_DMC_PATH; dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { dmc->fw_path = KBL_DMC_PATH; dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; } else if (IS_SKYLAKE(i915)) { dmc->fw_path = SKL_DMC_PATH; dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; } else if (IS_BROXTON(i915)) { dmc->fw_path = BXT_DMC_PATH; dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; } if (i915->params.dmc_firmware_path) { if (strlen(i915->params.dmc_firmware_path) == 0) { drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); goto out; } dmc->fw_path = i915->params.dmc_firmware_path; } if (!dmc->fw_path) { drm_dbg_kms(&i915->drm, "No known DMC firmware for platform, disabling runtime PM\n"); goto out; } i915->display.dmc.dmc = dmc; drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); queue_work(i915->unordered_wq, &dmc->work); return; out: kfree(dmc); } /** * intel_dmc_suspend() - prepare DMC firmware before system suspend * @i915: i915 drm device * * Prepare the DMC firmware before entering system suspend. This includes * flushing pending work items and releasing any resources acquired during * init. */ void intel_dmc_suspend(struct drm_i915_private *i915) { struct intel_dmc *dmc = i915_to_dmc(i915); if (!HAS_DMC(i915)) return; if (dmc) flush_work(&dmc->work); /* Drop the reference held in case DMC isn't loaded. */ if (!intel_dmc_has_payload(i915)) intel_dmc_runtime_pm_put(i915); } /** * intel_dmc_resume() - init DMC firmware during system resume * @i915: i915 drm device * * Reinitialize the DMC firmware during system resume, reacquiring any * resources released in intel_dmc_suspend(). */ void intel_dmc_resume(struct drm_i915_private *i915) { if (!HAS_DMC(i915)) return; /* * Reacquire the reference to keep RPM disabled in case DMC isn't * loaded. */ if (!intel_dmc_has_payload(i915)) intel_dmc_runtime_pm_get(i915); } /** * intel_dmc_fini() - unload the DMC firmware. * @i915: i915 drm device. * * Firmmware unloading includes freeing the internal memory and reset the * firmware loading status. */ void intel_dmc_fini(struct drm_i915_private *i915) { struct intel_dmc *dmc = i915_to_dmc(i915); enum intel_dmc_id dmc_id; if (!HAS_DMC(i915)) return; intel_dmc_suspend(i915); drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); if (dmc) { for_each_dmc_id(dmc_id) kfree(dmc->dmc_info[dmc_id].payload); kfree(dmc); i915->display.dmc.dmc = NULL; } } void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { struct intel_dmc *dmc = i915_to_dmc(i915); if (!HAS_DMC(i915)) return; i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); i915_error_printf(m, "DMC loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); if (dmc) i915_error_printf(m, "DMC fw version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = m->private; struct intel_dmc *dmc = i915_to_dmc(i915); intel_wakeref_t wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; if (!HAS_DMC(i915)) return -ENODEV; wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", str_yes_no(GRAPHICS_VER(i915) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", str_yes_no(IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)); seq_printf(m, "Pipe B fw loaded: %s\n", str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); if (!intel_dmc_has_payload(i915)) goto out; seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); if (DISPLAY_VER(i915) >= 12) { i915_reg_t dc3co_reg; if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { dc3co_reg = DG1_DMC_DEBUG3; dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; } else { dc3co_reg = TGL_DMC_DEBUG3; dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; } seq_printf(m, "DC3CO count: %d\n", intel_de_read(i915, dc3co_reg)); } else { dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : SKL_DMC_DC3_DC5_COUNT; if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)) dc6_reg = SKL_DMC_DC5_DC6_COUNT; } seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); if (i915_mmio_reg_valid(dc6_reg)) seq_printf(m, "DC5 -> DC6 count: %d\n", intel_de_read(i915, dc6_reg)); seq_printf(m, "program base: 0x%08x\n", intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); out: seq_printf(m, "ssp base: 0x%08x\n", intel_de_read(i915, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); void intel_dmc_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, i915, &intel_dmc_debugfs_status_fops); }
linux-master
drivers/gpu/drm/i915/display/intel_dmc.c
/* * Copyright © 2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ /* * Laptops with Intel GPUs which have panels that support controlling the * backlight through DP AUX can actually use two different interfaces: Intel's * proprietary DP AUX backlight interface, and the standard VESA backlight * interface. Unfortunately, at the time of writing this a lot of laptops will * advertise support for the standard VESA backlight interface when they * don't properly support it. However, on these systems the Intel backlight * interface generally does work properly. Additionally, these systems will * usually just indicate that they use PWM backlight controls in their VBIOS * for some reason. */ #include "i915_drv.h" #include "intel_backlight.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux_backlight.h" /* TODO: * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we * can make people's backlights work in the mean time */ /* * DP AUX registers for Intel's proprietary HDR backlight interface. We define * them here since we'll likely be the only driver to ever use these. */ #define INTEL_EDP_HDR_TCON_CAP0 0x340 #define INTEL_EDP_HDR_TCON_CAP1 0x341 # define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0) # define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1) # define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2) # define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3) # define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4) # define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5) # define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6) # define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7) #define INTEL_EDP_HDR_TCON_CAP2 0x342 # define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0) #define INTEL_EDP_HDR_TCON_CAP3 0x343 #define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344 # define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0) # define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1) # define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */ # define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3) # define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4) # define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5) /* Bit 6 is reserved */ # define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7) #define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */ #define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A #define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352 #define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354 #define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355 #define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356 #define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357 #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358 # define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3) # define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0 # define INTEL_EDP_TCON_USAGE_DESKTOP 0x1 # define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2 # define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3 # define INTEL_EDP_TCON_POWER_MASK BIT(4) # define INTEL_EDP_TCON_POWER_DC (0 << 4) # define INTEL_EDP_TCON_POWER_AC (1 << 4) # define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7) #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 enum intel_dp_aux_backlight_modparam { INTEL_DP_AUX_BACKLIGHT_AUTO = -1, INTEL_DP_AUX_BACKLIGHT_OFF = 0, INTEL_DP_AUX_BACKLIGHT_ON = 1, INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, }; static bool is_intel_tcon_cap(const u8 tcon_cap[4]) { return tcon_cap[0] >= 1; } /* Intel EDP backlight callbacks */ static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct drm_dp_aux *aux = &intel_dp->aux; struct intel_panel *panel = &connector->panel; int ret; u8 tcon_cap[4]; intel_dp_wait_source_oui(intel_dp); ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); if (ret != sizeof(tcon_cap)) return false; if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP)) return false; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", connector->base.base.id, connector->base.name, is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]); if (!is_intel_tcon_cap(tcon_cap)) return false; /* * If we don't have HDR static metadata there is no way to * runtime detect used range for nits based control. For now * do not use Intel proprietary eDP backlight control if we * don't have this data in panel EDID. In case we find panel * which supports only nits based control, but doesn't provide * HDR static metadata we need to start maintaining table of * ranges for such panels. */ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1))) { drm_info(&i915->drm, "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", connector->base.base.id, connector->base.name, INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); return false; } panel->backlight.edp.intel.sdr_uses_aux = tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP; return true; } static u32 intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 tmp; u8 buf[2] = { 0 }; if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", connector->base.base.id, connector->base.name); return 0; } if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) { if (!panel->backlight.edp.intel.sdr_uses_aux) { u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe); return intel_backlight_level_from_pwm(connector, pwm_level); } /* Assume 100% brightness if backlight controls aren't enabled yet */ return panel->backlight.max; } if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", connector->base.base.id, connector->base.name); return 0; } return (buf[1] << 8 | buf[0]); } static void intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_device *dev = connector->base.dev; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); u8 buf[4] = { 0 }; buf[0] = level & 0xFF; buf[1] = (level & 0xFF00) >> 8; if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n", connector->base.base.id, connector->base.name); } static void intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; if (panel->backlight.edp.intel.sdr_uses_aux) { intel_dp_aux_hdr_set_aux_backlight(conn_state, level); } else { const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); intel_backlight_set_pwm_level(conn_state, pwm_level); } } static void intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); int ret; u8 old_ctrl, ctrl; intel_dp_wait_source_oui(intel_dp); ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", connector->base.base.id, connector->base.name, ret); return; } ctrl = old_ctrl; if (panel->backlight.edp.intel.sdr_uses_aux) { ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; intel_dp_aux_hdr_set_aux_backlight(conn_state, level); } else { u32 pwm_level = intel_backlight_level_to_pwm(connector, level); panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; } if (ctrl != old_ctrl && drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", connector->base.base.id, connector->base.name); } static void intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; /* Nothing to do for AUX based backlight controls */ if (panel->backlight.edp.intel.sdr_uses_aux) return; /* Note we want the actual pwm_level to be 0, regardless of pwm_min */ panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0)); } static const char *dpcd_vs_pwm_str(bool aux) { return aux ? "DPCD" : "PWM"; } static int intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; struct drm_luminance_range_info *luminance_range = &connector->base.display_info.luminance_range; int ret; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux)); if (!panel->backlight.edp.intel.sdr_uses_aux) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n", connector->base.base.id, connector->base.name, ret); return ret; } } if (luminance_range->max_luminance) { panel->backlight.max = luminance_range->max_luminance; panel->backlight.min = luminance_range->min_luminance; } else { panel->backlight.max = 512; panel->backlight.min = 0; } drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", connector->base.base.id, connector->base.name, panel->backlight.min, panel->backlight.max); panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe); panel->backlight.enabled = panel->backlight.level != 0; return 0; } /* VESA backlight callbacks */ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused) { return connector->panel.backlight.level; } static void intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); if (!panel->backlight.edp.vesa.info.aux_set) { const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); intel_backlight_set_pwm_level(conn_state, pwm_level); } drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } static void intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); if (!panel->backlight.edp.vesa.info.aux_enable) { u32 pwm_level; if (!panel->backlight.edp.vesa.info.aux_set) pwm_level = intel_backlight_level_to_pwm(connector, level); else pwm_level = intel_backlight_invert_pwm_level(connector, panel->backlight.pwm_level_max); panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); } drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); if (!panel->backlight.edp.vesa.info.aux_enable) panel->backlight.pwm_funcs->disable(old_conn_state, intel_backlight_invert_pwm_level(connector, 0)); } static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_panel *panel = &connector->panel; struct drm_i915_private *i915 = dp_to_i915(intel_dp); u16 current_level; u8 current_mode; int ret; ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info, panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, &current_level, &current_mode); if (ret < 0) return ret; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", connector->base.base.id, connector->base.name, dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", connector->base.base.id, connector->base.name, ret); return ret; } } if (panel->backlight.edp.vesa.info.aux_set) { panel->backlight.max = panel->backlight.edp.vesa.info.max; panel->backlight.min = 0; if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { panel->backlight.level = current_level; panel->backlight.enabled = panel->backlight.level != 0; } else { panel->backlight.level = panel->backlight.max; panel->backlight.enabled = false; } } else { panel->backlight.max = panel->backlight.pwm_level_max; panel->backlight.min = panel->backlight.pwm_level_min; if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe); panel->backlight.enabled = panel->backlight.pwm_enabled; } else { panel->backlight.level = panel->backlight.max; panel->backlight.enabled = false; } } drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static bool intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", connector->base.base.id, connector->base.name); return true; } return false; } static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = { .setup = intel_dp_aux_hdr_setup_backlight, .enable = intel_dp_aux_hdr_enable_backlight, .disable = intel_dp_aux_hdr_disable_backlight, .set = intel_dp_aux_hdr_set_backlight, .get = intel_dp_aux_hdr_get_backlight, }; static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { .setup = intel_dp_aux_vesa_setup_backlight, .enable = intel_dp_aux_vesa_enable_backlight, .disable = intel_dp_aux_vesa_disable_backlight, .set = intel_dp_aux_vesa_set_backlight, .get = intel_dp_aux_vesa_get_backlight, }; int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool try_intel_interface = false, try_vesa_interface = false; /* Check the VBT and user's module parameters to figure out which * interfaces to probe */ switch (i915->params.enable_dpcd_backlight) { case INTEL_DP_AUX_BACKLIGHT_OFF: return -ENODEV; case INTEL_DP_AUX_BACKLIGHT_AUTO: switch (panel->vbt.backlight.type) { case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE: try_vesa_interface = true; break; case INTEL_BACKLIGHT_DISPLAY_DDI: try_intel_interface = true; break; default: return -ENODEV; } break; case INTEL_DP_AUX_BACKLIGHT_ON: if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE) try_intel_interface = true; try_vesa_interface = true; break; case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA: try_vesa_interface = true; break; case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL: try_intel_interface = true; break; } /* * Since Intel has their own backlight control interface, the majority of machines out there * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA * interface. This means that there's quite a number of panels out there that will advertise * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups. * * There's a catch to this though: on many panels that advertise support for both * interfaces, the VESA backlight interface will stop working once we've programmed the * panel with Intel's OUI - which is also required for us to be able to detect Intel's * backlight interface at all. This means that the only sensible way for us to detect both * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n", connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_hdr_bl_funcs; return 0; } if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n", connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_vesa_bl_funcs; return 0; } return -ENODEV; }
linux-master
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/drm_atomic_helper.h> #include "i915_drv.h" #include "intel_clock_gating.h" #include "intel_display_driver.h" #include "intel_display_reset.h" #include "intel_display_types.h" #include "intel_hotplug.h" #include "intel_pps.h" static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) { return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && intel_has_gpu_reset(to_gt(dev_priv))); } void intel_display_reset_prepare(struct drm_i915_private *dev_priv) { struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx; struct drm_atomic_state *state; int ret; if (!HAS_DISPLAY(dev_priv)) return; /* reset doesn't touch the display */ if (!dev_priv->params.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; /* We have a modeset vs reset deadlock, defensively unbreak it. */ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); smp_mb__after_atomic(); wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { drm_dbg_kms(&dev_priv->drm, "Modeset potentially stuck, unbreaking through wedging\n"); intel_gt_set_wedged(to_gt(dev_priv)); } /* * Need mode_config.mutex so that we don't * trample ongoing ->detect() and whatnot. */ mutex_lock(&dev_priv->drm.mode_config.mutex); drm_modeset_acquire_init(ctx, 0); while (1) { ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx); if (ret != -EDEADLK) break; drm_modeset_backoff(ctx); } /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx); if (IS_ERR(state)) { ret = PTR_ERR(state); drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", ret); return; } ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx); if (ret) { drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", ret); drm_atomic_state_put(state); return; } dev_priv->display.restore.modeset_state = state; state->acquire_ctx = ctx; } void intel_display_reset_finish(struct drm_i915_private *i915) { struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx; struct drm_atomic_state *state; int ret; if (!HAS_DISPLAY(i915)) return; /* reset doesn't touch the display */ if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags)) return; state = fetch_and_zero(&i915->display.restore.modeset_state); if (!state) goto unlock; /* reset doesn't touch the display */ if (!gpu_reset_clobbers_display(i915)) { /* for testing only restore the display */ ret = drm_atomic_helper_commit_duplicated_state(state, ctx); if (ret) { drm_WARN_ON(&i915->drm, ret == -EDEADLK); drm_err(&i915->drm, "Restoring old state failed with %i\n", ret); } } else { /* * The display has been reset as well, * so need a full re-initialization. */ intel_pps_unlock_regs_wa(i915); intel_display_driver_init_hw(i915); intel_clock_gating_init(i915); intel_hpd_init(i915); ret = __intel_display_driver_resume(i915, state, ctx); if (ret) drm_err(&i915->drm, "Restoring old state failed with %i\n", ret); intel_hpd_poll_disable(i915); } drm_atomic_state_put(state); unlock: drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); mutex_unlock(&i915->drm.mode_config.mutex); clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags); }
linux-master
drivers/gpu/drm/i915/display/intel_display_reset.c
/* * Copyright 2006 Dave Airlie <[email protected]> * Copyright © 2006-2009 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * Jesse Barnes <[email protected]> */ #include <linux/delay.h> #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/display/drm_scdc_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/intel_lpe_audio.h> #include "g4x_hdmi.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" #include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" #include "intel_hdmi.h" #include "intel_lspcon.h" #include "intel_panel.h" #include "intel_snps_phy.h" inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi) { return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev); } static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi); u32 enabled_bits; enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; drm_WARN(&dev_priv->drm, intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits, "HDMI port enabled, expecting disabled\n"); } static void assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { drm_WARN(&dev_priv->drm, intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE, "HDMI transcoder function enabled, expecting disabled\n"); } static u32 g4x_infoframe_index(unsigned int type) { switch (type) { case HDMI_PACKET_TYPE_GAMUT_METADATA: return VIDEO_DIP_SELECT_GAMUT; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_SELECT_AVI; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_SELECT_SPD; case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_SELECT_VENDOR; default: MISSING_CASE(type); return 0; } } static u32 g4x_infoframe_enable(unsigned int type) { switch (type) { case HDMI_PACKET_TYPE_GENERAL_CONTROL: return VIDEO_DIP_ENABLE_GCP; case HDMI_PACKET_TYPE_GAMUT_METADATA: return VIDEO_DIP_ENABLE_GAMUT; case DP_SDP_VSC: return 0; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD; case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VENDOR; case HDMI_INFOFRAME_TYPE_DRM: return 0; default: MISSING_CASE(type); return 0; } } static u32 hsw_infoframe_enable(unsigned int type) { switch (type) { case HDMI_PACKET_TYPE_GENERAL_CONTROL: return VIDEO_DIP_ENABLE_GCP_HSW; case HDMI_PACKET_TYPE_GAMUT_METADATA: return VIDEO_DIP_ENABLE_GMP_HSW; case DP_SDP_VSC: return VIDEO_DIP_ENABLE_VSC_HSW; case DP_SDP_PPS: return VDIP_ENABLE_PPS; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI_HSW; case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD_HSW; case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VS_HSW; case HDMI_INFOFRAME_TYPE_DRM: return VIDEO_DIP_ENABLE_DRM_GLK; default: MISSING_CASE(type); return 0; } } static i915_reg_t hsw_dip_data_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, unsigned int type, int i) { switch (type) { case HDMI_PACKET_TYPE_GAMUT_METADATA: return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i); case DP_SDP_VSC: return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); case DP_SDP_PPS: return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_AVI: return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_SPD: return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_VENDOR: return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_DRM: return GLK_TVIDEO_DIP_DRM_DATA(cpu_transcoder, i); default: MISSING_CASE(type); return INVALID_MMIO_REG; } } static int hsw_dip_data_size(struct drm_i915_private *dev_priv, unsigned int type) { switch (type) { case DP_SDP_VSC: return VIDEO_DIP_VSC_DATA_SIZE; case DP_SDP_PPS: return VIDEO_DIP_PPS_DATA_SIZE; case HDMI_PACKET_TYPE_GAMUT_METADATA: if (DISPLAY_VER(dev_priv) >= 11) return VIDEO_DIP_GMP_DATA_SIZE; else return VIDEO_DIP_DATA_SIZE; default: return VIDEO_DIP_DATA_SIZE; } } static void g4x_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); int i; drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); intel_de_write(dev_priv, VIDEO_DIP_CTL, val); for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, VIDEO_DIP_DATA, *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) intel_de_write(dev_priv, VIDEO_DIP_DATA, 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; intel_de_write(dev_priv, VIDEO_DIP_CTL, val); intel_de_posting_read(dev_priv, VIDEO_DIP_CTL); } static void g4x_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 *data = frame; int i; intel_de_rmw(dev_priv, VIDEO_DIP_CTL, VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); } static u32 g4x_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); } static void ibx_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); } static void ibx_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; i915_reg_t reg = TVIDEO_DIP_CTL(pipe); u32 val = intel_de_read(dev_priv, reg); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } static void cpt_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); /* The DIP control register spec says that we need to update the AVI * infoframe without clearing its enable bit */ if (type != HDMI_INFOFRAME_TYPE_AVI) val &= ~g4x_infoframe_enable(type); intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); } static void cpt_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } static void vlv_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); int i; drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, VLV_TVIDEO_DIP_DATA(crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) intel_de_write(dev_priv, VLV_TVIDEO_DIP_DATA(crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); } static void vlv_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 *data = frame; int i; intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, VLV_TVIDEO_DIP_DATA(crtc->pipe)); } static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } void hsw_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); int data_size; int i; u32 val = intel_de_read(dev_priv, ctl_reg); data_size = hsw_dip_data_size(dev_priv, type); drm_WARN_ON(&dev_priv->drm, len > data_size); val &= ~hsw_infoframe_enable(type); intel_de_write(dev_priv, ctl_reg, val); for (i = 0; i < len; i += 4) { intel_de_write(dev_priv, hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < data_size; i += 4) intel_de_write(dev_priv, hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), 0); /* Wa_14013475917 */ if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC) return; val |= hsw_infoframe_enable(type); intel_de_write(dev_priv, ctl_reg, val); intel_de_posting_read(dev_priv, ctl_reg); } void hsw_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 *data = frame; int i; for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2)); } static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); u32 mask; mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); if (DISPLAY_VER(dev_priv) >= 10) mask |= VIDEO_DIP_ENABLE_DRM_GLK; return val & mask; } static const u8 infoframe_type_to_idx[] = { HDMI_PACKET_TYPE_GENERAL_CONTROL, HDMI_PACKET_TYPE_GAMUT_METADATA, DP_SDP_VSC, HDMI_INFOFRAME_TYPE_AVI, HDMI_INFOFRAME_TYPE_SPD, HDMI_INFOFRAME_TYPE_VENDOR, HDMI_INFOFRAME_TYPE_DRM, }; u32 intel_hdmi_infoframe_enable(unsigned int type) { int i; for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { if (infoframe_type_to_idx[i] == type) return BIT(i); } return 0; } u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val, ret = 0; int i; val = dig_port->infoframes_enabled(encoder, crtc_state); /* map from hardware bits to dip idx */ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { unsigned int type = infoframe_type_to_idx[i]; if (HAS_DDI(dev_priv)) { if (val & hsw_infoframe_enable(type)) ret |= BIT(i); } else { if (val & g4x_infoframe_enable(type)) ret |= BIT(i); } } return ret; } /* * The data we write to the DIP data buffer registers is 1 byte bigger than the * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting * at 0). It's also a byte used by DisplayPort so the same DIP registers can be * used for both technologies. * * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0 * DW1: DB3 | DB2 | DB1 | DB0 * DW2: DB7 | DB6 | DB5 | DB4 * DW3: ... * * (HB is Header Byte, DB is Data Byte) * * The hdmi pack() functions don't know about that hardware specific hole so we * trick them by giving an offset into the buffer and moving back the header * bytes by one. */ static void intel_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const union hdmi_infoframe *frame) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; if (drm_WARN_ON(encoder->base.dev, frame->any.type != type)) return; /* see comment above for the reason for this offset */ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1); if (drm_WARN_ON(encoder->base.dev, len < 0)) return; /* Insert the 'hole' (see big comment above) at position 3 */ memmove(&buffer[0], &buffer[1], 3); buffer[3] = 0; len++; dig_port->write_infoframe(encoder, crtc_state, type, buffer, len); } void intel_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, union hdmi_infoframe *frame) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; int ret; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; dig_port->read_infoframe(encoder, crtc_state, type, buffer, sizeof(buffer)); /* Fill the 'hole' (see big comment above) at position 3 */ memmove(&buffer[1], &buffer[0], 3); /* see comment above for the reason for this offset */ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1); if (ret) { drm_dbg_kms(encoder->base.dev, "Failed to unpack infoframe type 0x%02x\n", type); return; } if (frame->any.type != type) drm_dbg_kms(encoder->base.dev, "Found the wrong infoframe type 0x%x (expected 0x%02x)\n", frame->any.type, type); } static bool intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; int ret; if (!crtc_state->has_infoframe) return true; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, adjusted_mode); if (ret) return false; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) frame->colorspace = HDMI_COLORSPACE_YUV420; else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) frame->colorspace = HDMI_COLORSPACE_YUV444; else frame->colorspace = HDMI_COLORSPACE_RGB; drm_hdmi_avi_infoframe_colorimetry(frame, conn_state); /* nonsense combination */ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { drm_hdmi_avi_infoframe_quant_range(frame, connector, adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); } else { frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; } drm_hdmi_avi_infoframe_content_type(frame, conn_state); /* TODO: handle pixel repetition for YCBCR420 outputs */ ret = hdmi_avi_infoframe_check(frame); if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; } static bool intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd; int ret; if (!crtc_state->has_infoframe) return true; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD); if (IS_DGFX(i915)) ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx"); else ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx"); if (drm_WARN_ON(encoder->base.dev, ret)) return false; frame->sdi = HDMI_SPD_SDI_PC; ret = hdmi_spd_infoframe_check(frame); if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; } static bool intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct hdmi_vendor_infoframe *frame = &crtc_state->infoframes.hdmi.vendor.hdmi; const struct drm_display_info *info = &conn_state->connector->display_info; int ret; if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe) return true; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR); ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, conn_state->connector, &crtc_state->hw.adjusted_mode); if (drm_WARN_ON(encoder->base.dev, ret)) return false; ret = hdmi_vendor_infoframe_check(frame); if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; } static bool intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int ret; if (DISPLAY_VER(dev_priv) < 10) return true; if (!crtc_state->has_infoframe) return true; if (!conn_state->hdr_output_metadata) return true; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM); ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state); if (ret < 0) { drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); return false; } ret = hdmi_drm_infoframe_check(frame); if (drm_WARN_ON(&dev_priv->drm, ret)) return false; return true; } static void g4x_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); /* If the registers were not initialized yet, they might be zeroes, * which means we're selecting the AVI DIP and we're setting its * frequency to once. This seems to really confuse the HW and make * things stop working (the register spec says the AVI always needs to * be sent every VSync). So here we avoid writing to the register more * than we need and also explicitly select the AVI DIP and explicitly * set its frequency to every VSync. Avoiding to write it twice seems to * be enough to solve the problem, but being defensive shouldn't hurt us * either. */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; if (!enable) { if (!(val & VIDEO_DIP_ENABLE)) return; if (port != (val & VIDEO_DIP_PORT_MASK)) { drm_dbg_kms(&dev_priv->drm, "video DIP still enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); return; } val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { if (val & VIDEO_DIP_ENABLE) { drm_dbg_kms(&dev_priv->drm, "video DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); return; } val &= ~VIDEO_DIP_PORT_MASK; val |= port; } val |= VIDEO_DIP_ENABLE; val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, &crtc_state->infoframes.avi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_SPD, &crtc_state->infoframes.spd); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); } /* * Determine if default_phase=1 can be indicated in the GCP infoframe. * * From HDMI specification 1.4a: * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0 * - The first pixel following each Video Data Period shall have a pixel packing phase of 0 * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing * phase of 0 */ static bool gcp_default_phase_possible(int pipe_bpp, const struct drm_display_mode *mode) { unsigned int pixels_per_group; switch (pipe_bpp) { case 30: /* 4 pixels in 5 clocks */ pixels_per_group = 4; break; case 36: /* 2 pixels in 3 clocks */ pixels_per_group = 2; break; case 48: /* 1 pixel in 2 clocks */ pixels_per_group = 1; break; default: /* phase information not relevant for 8bpc */ return false; } return mode->crtc_hdisplay % pixels_per_group == 0 && mode->crtc_htotal % pixels_per_group == 0 && mode->crtc_hblank_start % pixels_per_group == 0 && mode->crtc_hblank_end % pixels_per_group == 0 && mode->crtc_hsync_start % pixels_per_group == 0 && mode->crtc_hsync_end % pixels_per_group == 0 && ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 || mode->crtc_htotal/2 % pixels_per_group == 0); } static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) return false; if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv)) reg = TVIDEO_DIP_GCP(crtc->pipe); else return false; intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp); return true; } void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) return; if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv)) reg = TVIDEO_DIP_GCP(crtc->pipe); else return; crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg); } static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (IS_G4X(dev_priv) || !crtc_state->has_infoframe) return; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL); /* Indicate color indication for deep color mode */ if (crtc_state->pipe_bpp > 24) crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION; /* Enable default_phase whenever the display mode is suitably aligned */ if (gcp_default_phase_possible(crtc_state->pipe_bpp, &crtc_state->hw.adjusted_mode)) crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE; } static void ibx_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; if (!enable) { if (!(val & VIDEO_DIP_ENABLE)) return; val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, "DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; val |= port; } val |= VIDEO_DIP_ENABLE; val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, &crtc_state->infoframes.avi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_SPD, &crtc_state->infoframes.spd); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); } static void cpt_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); assert_hdmi_port_disabled(intel_hdmi); /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; if (!enable) { if (!(val & VIDEO_DIP_ENABLE)) return; val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); return; } /* Set both together, unset both together: see the spec. */ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, &crtc_state->infoframes.avi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_SPD, &crtc_state->infoframes.spd); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); } static void vlv_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe); u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; if (!enable) { if (!(val & VIDEO_DIP_ENABLE)) return; val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, "DIP already enabled on port %c\n", (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; val |= port; } val |= VIDEO_DIP_ENABLE; val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, &crtc_state->infoframes.avi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_SPD, &crtc_state->infoframes.spd); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); } static void hsw_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); u32 val = intel_de_read(dev_priv, reg); assert_hdmi_transcoder_func_disabled(dev_priv, crtc_state->cpu_transcoder); val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK); if (!enable) { intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); return; } if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP_HSW; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, &crtc_state->infoframes.avi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_SPD, &crtc_state->infoframes.spd); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_DRM, &crtc_state->infoframes.drm); } void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); struct i2c_adapter *adapter; if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", enable ? "Enabling" : "Disabling"); drm_dp_dual_mode_set_tmds_output(&dev_priv->drm, hdmi->dp_dual_mode.type, adapter, enable); } static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port, unsigned int offset, void *buffer, size_t size) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; u8 start = offset & 0xff; struct i2c_msg msgs[] = { { .addr = DRM_HDCP_DDC_ADDR, .flags = 0, .len = 1, .buf = &start, }, { .addr = DRM_HDCP_DDC_ADDR, .flags = I2C_M_RD, .len = size, .buf = buffer } }; ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); if (ret == ARRAY_SIZE(msgs)) return 0; return ret >= 0 ? -EIO : ret; } static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port, unsigned int offset, void *buffer, size_t size) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; u8 *write_buf; struct i2c_msg msg; write_buf = kzalloc(size + 1, GFP_KERNEL); if (!write_buf) return -ENOMEM; write_buf[0] = offset & 0xff; memcpy(&write_buf[1], buffer, size); msg.addr = DRM_HDCP_DDC_ADDR; msg.flags = 0, msg.len = size + 1, msg.buf = write_buf; ret = i2c_transfer(adapter, &msg, 1); if (ret == 1) ret = 0; else if (ret >= 0) ret = -EIO; kfree(write_buf); return ret; } static int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an, DRM_HDCP_AN_LEN); if (ret) { drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n", ret); return ret; } ret = intel_gmbus_output_aksv(adapter); if (ret < 0) { drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret); return ret; } return 0; } static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n", ret); return ret; } static int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n", ret); return ret; } static int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; u8 val; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", ret); return ret; } *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; return 0; } static int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n", ret); return ret; } static int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; u8 val; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", ret); return ret; } *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; return 0; } static int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO, ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); if (ret) { drm_dbg_kms(&i915->drm, "Read ksv fifo over DDC failed (%d)\n", ret); return ret; } return 0; } static int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) return -EINVAL; ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n", i, ret); return ret; } static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, enum transcoder cpu_transcoder) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc); u32 scanline; int ret; for (;;) { scanline = intel_de_read(dev_priv, PIPEDSL(crtc->pipe)); if (scanline > 100 && scanline < 200) break; usleep_range(25, 50); } ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder, false, TRANS_DDI_HDCP_SIGNALLING); if (ret) { drm_err(&dev_priv->drm, "Disable HDCP signalling failed (%d)\n", ret); return ret; } ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder, true, TRANS_DDI_HDCP_SIGNALLING); if (ret) { drm_err(&dev_priv->drm, "Enable HDCP signalling failed (%d)\n", ret); return ret; } return 0; } static int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, enum transcoder cpu_transcoder, bool enable) { struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_connector *connector = hdmi->attached_connector; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int ret; if (!enable) usleep_range(6, 60); /* Bspec says >= 6us */ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder, enable, TRANS_DDI_HDCP_SIGNALLING); if (ret) { drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", enable ? "Enable" : "Disable", ret); return ret; } /* * WA: To fix incorrect positioning of the window of * opportunity and enc_en signalling in KABYLAKE. */ if (IS_KABYLAKE(dev_priv) && enable) return kbl_repositioning_enc_en_signal(connector, cpu_transcoder); return 0; } static bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; union { u32 reg; u8 shim[DRM_HDCP_RI_LEN]; } ri; ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim); if (ret) return false; intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n", intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); return false; } return true; } static bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int retry; for (retry = 0; retry < 3; retry++) if (intel_hdmi_hdcp_check_link_once(dig_port, connector)) return true; drm_err(&i915->drm, "Link check failed\n"); return false; } struct hdcp2_hdmi_msg_timeout { u8 msg_id; u16 timeout; }; static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = { { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, }, { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, }, { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, }, { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, }, { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, }, }; static int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port, u8 *rx_status) { return intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_HDMI_RXSTATUS_LEN); } static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) { int i; if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) { if (is_paired) return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS; else return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS; } for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) { if (hdcp2_msg_timeout[i].msg_id == msg_id) return hdcp2_msg_timeout[i].timeout; } return -EINVAL; } static int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, u8 msg_id, bool *msg_ready, ssize_t *msg_sz) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status); if (ret < 0) { drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n", ret); return ret; } *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) | rx_status[0]); if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) && *msg_sz); else *msg_ready = *msg_sz; return 0; } static ssize_t intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, u8 msg_id, bool paired) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); bool msg_ready = false; int timeout, ret; ssize_t msg_sz = 0; timeout = get_hdcp2_msg_timeout(msg_id, paired); if (timeout < 0) return timeout; ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port, msg_id, &msg_ready, &msg_sz), !ret && msg_ready && msg_sz, timeout * 1000, 1000, 5 * 1000); if (ret) drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n", msg_id, ret, timeout); return ret ? ret : msg_sz; } static int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port, void *buf, size_t size) { unsigned int offset; offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET; return intel_hdmi_hdcp_write(dig_port, offset, buf, size); } static int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; unsigned int offset; ssize_t ret; ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id, hdcp->is_paired); if (ret < 0) return ret; /* * Available msg size should be equal to or lesser than the * available buffer. */ if (ret > size) { drm_dbg_kms(&i915->drm, "msg_sz(%zd) is more than exp size(%zu)\n", ret, size); return -EINVAL; } offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret); if (ret) drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n", msg_id, ret); return ret; } static int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status); if (ret) return ret; /* * Re-auth request and Link Integrity Failures are represented by * same bit. i.e reauth_req. */ if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1])) ret = HDCP_REAUTH_REQUEST; else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1])) ret = HDCP_TOPOLOGY_CHANGE; return ret; } static int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port, bool *capable) { u8 hdcp2_version; int ret; *capable = false; ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET, &hdcp2_version, sizeof(hdcp2_version)); if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK) *capable = true; return ret; } static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, .read_bksv = intel_hdmi_hdcp_read_bksv, .read_bstatus = intel_hdmi_hdcp_read_bstatus, .repeater_present = intel_hdmi_hdcp_repeater_present, .read_ri_prime = intel_hdmi_hdcp_read_ri_prime, .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready, .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo, .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part, .toggle_signalling = intel_hdmi_hdcp_toggle_signalling, .check_link = intel_hdmi_hdcp_check_link, .write_2_2_msg = intel_hdmi_hdcp2_write_msg, .read_2_2_msg = intel_hdmi_hdcp2_read_msg, .check_2_2_link = intel_hdmi_hdcp2_check_link, .hdcp_2_2_capable = intel_hdmi_hdcp2_capable, .protocol = HDCP_PROTOCOL_HDMI, }; static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int max_tmds_clock, vbt_max_tmds_clock; if (DISPLAY_VER(dev_priv) >= 10) max_tmds_clock = 594000; else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) max_tmds_clock = 300000; else if (DISPLAY_VER(dev_priv) >= 5) max_tmds_clock = 225000; else max_tmds_clock = 165000; vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata); if (vbt_max_tmds_clock) max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock); return max_tmds_clock; } static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, const struct drm_connector_state *conn_state) { struct intel_connector *connector = hdmi->attached_connector; return connector->base.display_info.is_hdmi && READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state) { return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420; } static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_downstream_limits, bool has_hdmi_sink) { struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder); if (respect_downstream_limits) { struct intel_connector *connector = hdmi->attached_connector; const struct drm_display_info *info = &connector->base.display_info; if (hdmi->dp_dual_mode.max_tmds_clock) max_tmds_clock = min(max_tmds_clock, hdmi->dp_dual_mode.max_tmds_clock); if (info->max_tmds_clock) max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); else if (!has_hdmi_sink) max_tmds_clock = min(max_tmds_clock, 165000); } return max_tmds_clock; } static enum drm_mode_status hdmi_port_clock_valid(struct intel_hdmi *hdmi, int clock, bool respect_downstream_limits, bool has_hdmi_sink) { struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port); if (clock < 25000) return MODE_CLOCK_LOW; if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, has_hdmi_sink)) return MODE_CLOCK_HIGH; /* GLK DPLL can't generate 446-480 MHz */ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000) return MODE_CLOCK_RANGE; /* BXT/GLK DPLL can't generate 223-240 MHz */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE; /* CHV DPLL can't generate 216-240 MHz */ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000) return MODE_CLOCK_RANGE; /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */ if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200) return MODE_CLOCK_RANGE; /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */ if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800) return MODE_CLOCK_RANGE; /* * SNPS PHYs' MPLLB table-based programming can only handle a fixed * set of link rates. * * FIXME: We will hopefully get an algorithmic way of programming * the MPLLB for HDMI in the future. */ if (DISPLAY_VER(dev_priv) >= 14) return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock); else if (IS_DG2(dev_priv)) return intel_snps_phy_check_hdmi_link_rate(clock); return MODE_OK; } int intel_hdmi_tmds_clock(int clock, int bpc, enum intel_output_format sink_format) { /* YCBCR420 TMDS rate requirement is half the pixel clock */ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) clock /= 2; /* * Need to adjust the port link by: * 1.5x for 12bpc * 1.25x for 10bpc */ return DIV_ROUND_CLOSEST(clock * bpc, 8); } static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) { switch (bpc) { case 12: return !HAS_GMCH(i915); case 10: return DISPLAY_VER(i915) >= 11; case 8: return true; default: MISSING_CASE(bpc); return false; } } static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, int bpc, bool has_hdmi_sink, enum intel_output_format sink_format) { const struct drm_display_info *info = &connector->display_info; const struct drm_hdmi_info *hdmi = &info->hdmi; switch (bpc) { case 12: if (!has_hdmi_sink) return false; if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36; else return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36; case 10: if (!has_hdmi_sink) return false; if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30; else return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30; case 8: return true; default: MISSING_CASE(bpc); return false; } } static enum drm_mode_status intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, bool has_hdmi_sink, enum intel_output_format sink_format) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum drm_mode_status status = MODE_OK; int bpc; /* * Try all color depths since valid port clock range * can have holes. Any mode that can be used with at * least one color depth is accepted. */ for (bpc = 12; bpc >= 8; bpc -= 2) { int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); if (!intel_hdmi_source_bpc_possible(i915, bpc)) continue; if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format)) continue; status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink); if (status == MODE_OK) return MODE_OK; } /* can never happen */ drm_WARN_ON(&i915->drm, status == MODE_OK); return status; } static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); enum drm_mode_status status; int clock = mode->clock; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state); bool ycbcr_420_only; enum intel_output_format sink_format; if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) clock *= 2; if (clock > max_dotclk) return MODE_CLOCK_HIGH; if (mode->flags & DRM_MODE_FLAG_DBLCLK) { if (!has_hdmi_sink) return MODE_CLOCK_LOW; clock *= 2; } /* * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be * enumerated only if FRL is supported. Current platforms do not support * FRL so prune the higher resolution modes that require doctclock more * than 600MHz. */ if (clock > 600000) return MODE_CLOCK_HIGH; ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode); if (ycbcr_420_only) sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; else sink_format = INTEL_OUTPUT_FORMAT_RGB; status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format); if (status != MODE_OK) { if (ycbcr_420_only || !connector->ycbcr_420_allowed || !drm_mode_is_420_also(&connector->display_info, mode)) return status; sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format); if (status != MODE_OK) return status; } return intel_mode_valid_max_plane_size(dev_priv, mode, false); } bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc, bool has_hdmi_sink) { struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; int i; for_each_new_connector_in_state(state, connector, connector_state, i) { if (connector_state->crtc != crtc_state->uapi.crtc) continue; if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, crtc_state->sink_format)) return false; } return true; } static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) return false; /* Display Wa_1405510057:icl,ehl */ if (intel_hdmi_is_ycbcr420(crtc_state) && bpc == 10 && DISPLAY_VER(dev_priv) == 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) return false; return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int clock, bool respect_downstream_limits) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int bpc; /* * pipe_bpp could already be below 8bpc due to FDI * bandwidth constraints. HDMI minimum is 8bpc however. */ bpc = max(crtc_state->pipe_bpp / 3, 8); /* * We will never exceed downstream TMDS clock limits while * attempting deep color. If the user insists on forcing an * out of spec mode they will have to be satisfied with 8bpc. */ if (!respect_downstream_limits) bpc = 8; for (; bpc >= 8; bpc -= 2) { int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, crtc_state->sink_format); if (hdmi_bpc_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, tmds_clock, respect_downstream_limits, crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } return -EINVAL; } static int intel_hdmi_compute_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, bool respect_downstream_limits) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock, respect_downstream_limits); if (bpc < 0) return bpc; crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, crtc_state->sink_format); /* * pipe_bpp could already be below 8bpc due to * FDI bandwidth constraints. We shouldn't bump it * back up to the HDMI minimum 8bpc in that case. */ crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3); drm_dbg_kms(&i915->drm, "picking %d bpc for HDMI output (pipe bpp: %d)\n", bpc, crtc_state->pipe_bpp); return 0; } bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) return false; if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* See CEA-861-E - 5.1 Default Encoding Parameters */ return crtc_state->has_hdmi_sink && drm_default_rgb_quant_range(adjusted_mode) == HDMI_QUANTIZATION_RANGE_LIMITED; } else { return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; } } static bool intel_hdmi_has_audio(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_connector *connector = conn_state->connector; const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); if (!crtc_state->has_hdmi_sink) return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) return connector->display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } static enum intel_output_format intel_hdmi_sink_format(const struct intel_crtc_state *crtc_state, struct intel_connector *connector, bool ycbcr_420_output) { if (!crtc_state->has_hdmi_sink) return INTEL_OUTPUT_FORMAT_RGB; if (connector->base.ycbcr_420_allowed && ycbcr_420_output) return INTEL_OUTPUT_FORMAT_YCBCR420; else return INTEL_OUTPUT_FORMAT_RGB; } static enum intel_output_format intel_hdmi_output_format(const struct intel_crtc_state *crtc_state) { return crtc_state->sink_format; } static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, bool respect_downstream_limits) { struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct drm_display_info *info = &connector->base.display_info; struct drm_i915_private *i915 = to_i915(connector->base.dev); bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; crtc_state->sink_format = intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only); if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) { drm_dbg_kms(&i915->drm, "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; } crtc_state->output_format = intel_hdmi_output_format(crtc_state); ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); if (ret) { if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || !crtc_state->has_hdmi_sink || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, adjusted_mode)) return ret; crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; crtc_state->output_format = intel_hdmi_output_format(crtc_state); ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); } return ret; } static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state) { return crtc_state->uapi.encoder_mask && !is_power_of_2(crtc_state->uapi.encoder_mask); } static bool source_supports_scrambling(struct intel_encoder *encoder) { /* * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and * scrambling is supported. * But there seem to be cases where certain platforms that support * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is * capped by VBT to less than 340MHz. * * In such cases when an HDMI2.0 sink is connected, it creates a * problem : the platform and the sink both support scrambling but the * HDMI 1.4 retimer chip doesn't. * * So go for scrambling, based on the max tmds clock taking into account, * restrictions coming from VBT. */ return intel_hdmi_source_max_tmds_clock(encoder) > 340000; } bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); return intel_has_hdmi_sink(hdmi, conn_state) && !intel_hdmi_is_cloned(crtc_state); } int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; if (!connector->interlace_allowed && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; pipe_config->has_audio = intel_hdmi_has_audio(encoder, pipe_config, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); /* * Try to respect downstream TMDS clock limits first, if * that fails assume the user might know something we don't. */ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, true); if (ret) ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false); if (ret) { drm_dbg_kms(&dev_priv->drm, "unsupported HDMI clock (%d kHz), rejecting mode\n", pipe_config->hw.adjusted_mode.crtc_clock); return ret; } if (intel_hdmi_is_ycbcr420(pipe_config)) { ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; } pipe_config->limited_color_range = intel_hdmi_limited_color_range(pipe_config, conn_state); if (conn_state->picture_aspect_ratio) adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; pipe_config->lane_count = 4; if (scdc->scrambling.supported && source_supports_scrambling(encoder)) { if (scdc->scrambling.low_rates) pipe_config->hdmi_scrambling = true; if (pipe_config->port_clock > 340000) { pipe_config->hdmi_scrambling = true; pipe_config->hdmi_high_tmds_clock_ratio = true; } } intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state); if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) { drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) { drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) { drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n"); return -EINVAL; } if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) { drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n"); return -EINVAL; } return 0; } void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); /* * Give a hand to buggy BIOSen which forget to turn * the TMDS output buffers back on after a reboot. */ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); } static void intel_hdmi_unset_edid(struct drm_connector *connector) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; intel_hdmi->dp_dual_mode.max_tmds_clock = 0; drm_edid_free(to_intel_connector(connector)->detect_edid); to_intel_connector(connector)->detect_edid = NULL; } static void intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(&dev_priv->drm, adapter); /* * Type 1 DVI adaptors are not required to implement any * registers, so we can't always detect their presence. * Ideally we should be able to check the state of the * CONFIG1 pin, but no such luck on our hardware. * * The only method left to us is to check the VBT to see * if the port is a dual mode capable DP port. */ if (type == DRM_DP_DUAL_MODE_UNKNOWN) { if (!connector->force && intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { drm_dbg_kms(&dev_priv->drm, "Assuming DP dual mode adaptor presence based on VBT\n"); type = DRM_DP_DUAL_MODE_TYPE1_DVI; } else { type = DRM_DP_DUAL_MODE_NONE; } } if (type == DRM_DP_DUAL_MODE_NONE) return; hdmi->dp_dual_mode.type = type; hdmi->dp_dual_mode.max_tmds_clock = drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, adapter); drm_dbg_kms(&dev_priv->drm, "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", drm_dp_get_dual_mode_type_name(type), hdmi->dp_dual_mode.max_tmds_clock); /* Older VBTs are often buggy and can't be trusted :( Play it safe. */ if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && !intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) { drm_dbg_kms(&dev_priv->drm, "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n"); hdmi->dp_dual_mode.max_tmds_clock = 0; } } static bool intel_hdmi_set_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); intel_wakeref_t wakeref; const struct drm_edid *drm_edid; const struct edid *edid; bool connected = false; struct i2c_adapter *i2c; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); drm_edid = drm_edid_read_ddc(connector, i2c); if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) { drm_dbg_kms(&dev_priv->drm, "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); intel_gmbus_force_bit(i2c, true); drm_edid = drm_edid_read_ddc(connector, i2c); intel_gmbus_force_bit(i2c, false); } /* Below we depend on display info having been updated */ drm_edid_connector_update(connector, drm_edid); to_intel_connector(connector)->detect_edid = drm_edid; /* FIXME: Get rid of drm_edid_raw() */ edid = drm_edid_raw(drm_edid); if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { intel_hdmi_dp_dual_mode_detect(connector); connected = true; } intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid); return connected; } static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; intel_wakeref_t wakeref; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (!INTEL_DISPLAY_ENABLED(dev_priv)) return connector_status_disconnected; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (DISPLAY_VER(dev_priv) >= 11 && !intel_digital_port_connected(encoder)) goto out; intel_hdmi_unset_edid(connector); if (intel_hdmi_set_edid(connector)) status = connector_status_connected; out: intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); if (status != connector_status_connected) cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); /* * Make sure the refs for power wells enabled during detect are * dropped to avoid a new detect cycle triggered by HPD polling. */ intel_display_power_flush_work(dev_priv); return status; } static void intel_hdmi_force(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_hdmi_unset_edid(connector); if (connector->status != connector_status_connected) return; intel_hdmi_set_edid(connector); } static int intel_hdmi_get_modes(struct drm_connector *connector) { /* drm_edid_connector_update() done in ->detect() or ->force() */ return drm_edid_connector_add_modes(connector); } static struct i2c_adapter * intel_hdmi_get_i2c_adapter(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector)); return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); } static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector); struct kobject *i2c_kobj = &adapter->dev.kobj; struct kobject *connector_kobj = &connector->kdev->kobj; int ret; ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name); if (ret) drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret); } static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector) { struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector); struct kobject *i2c_kobj = &adapter->dev.kobj; struct kobject *connector_kobj = &connector->kdev->kobj; sysfs_remove_link(connector_kobj, i2c_kobj->name); } static int intel_hdmi_connector_register(struct drm_connector *connector) { int ret; ret = intel_connector_register(connector); if (ret) return ret; intel_hdmi_create_i2c_symlink(connector); return ret; } static void intel_hdmi_connector_unregister(struct drm_connector *connector) { struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); intel_hdmi_remove_i2c_symlink(connector); intel_connector_unregister(connector); } static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .detect = intel_hdmi_detect, .force = intel_hdmi_force, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_hdmi_connector_register, .early_unregister = intel_hdmi_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static int intel_hdmi_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->dev); if (HAS_DDI(i915)) return intel_digital_connector_atomic_check(connector, state); else return g4x_hdmi_connector_atomic_check(connector, state); } static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, .atomic_check = intel_hdmi_connector_atomic_check, }; static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); intel_attach_hdmi_colorspace_property(connector); drm_connector_attach_content_type_property(connector); if (DISPLAY_VER(dev_priv) >= 10) drm_connector_attach_hdr_output_metadata_property(connector); if (!HAS_GMCH(dev_priv)) drm_connector_attach_max_bpc_property(connector, 8, 12); } /* * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup * @encoder: intel_encoder * @connector: drm_connector * @high_tmds_clock_ratio = bool to indicate if the function needs to set * or reset the high tmds clock ratio for scrambling * @scrambling: bool to Indicate if the function needs to set or reset * sink scrambling * * This function handles scrambling on HDMI 2.0 capable sinks. * If required clock rate is > 340 Mhz && scrambling is supported by sink * it enables scrambling. This should be called before enabling the HDMI * 2.0 port, as the sink can choose to disable the scrambling if it doesn't * detect a scrambled clock within 100 ms. * * Returns: * True on success, false on failure. */ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, bool scrambling) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_scrambling *sink_scrambling = &connector->display_info.hdmi.scdc.scrambling; if (!sink_scrambling->supported) return true; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n", connector->base.id, connector->name, str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10); /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */ return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) && drm_scdc_set_scrambling(connector, scrambling); } static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { u8 ddc_pin; switch (port) { case PORT_B: ddc_pin = GMBUS_PIN_DPB; break; case PORT_C: ddc_pin = GMBUS_PIN_DPC; break; case PORT_D: ddc_pin = GMBUS_PIN_DPD_CHV; break; default: MISSING_CASE(port); ddc_pin = GMBUS_PIN_DPB; break; } return ddc_pin; } static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { u8 ddc_pin; switch (port) { case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; case PORT_C: ddc_pin = GMBUS_PIN_2_BXT; break; default: MISSING_CASE(port); ddc_pin = GMBUS_PIN_1_BXT; break; } return ddc_pin; } static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { u8 ddc_pin; switch (port) { case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; case PORT_C: ddc_pin = GMBUS_PIN_2_BXT; break; case PORT_D: ddc_pin = GMBUS_PIN_4_CNP; break; case PORT_F: ddc_pin = GMBUS_PIN_3_BXT; break; default: MISSING_CASE(port); ddc_pin = GMBUS_PIN_1_BXT; break; } return ddc_pin; } static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { enum phy phy = intel_port_to_phy(dev_priv, port); if (intel_phy_is_combo(dev_priv, phy)) return GMBUS_PIN_1_BXT + port; else if (intel_phy_is_tc(dev_priv, phy)) return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port); drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port)); return GMBUS_PIN_2_BXT; } static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { enum phy phy = intel_port_to_phy(dev_priv, port); u8 ddc_pin; switch (phy) { case PHY_A: ddc_pin = GMBUS_PIN_1_BXT; break; case PHY_B: ddc_pin = GMBUS_PIN_2_BXT; break; case PHY_C: ddc_pin = GMBUS_PIN_9_TC1_ICP; break; default: MISSING_CASE(phy); ddc_pin = GMBUS_PIN_1_BXT; break; } return ddc_pin; } static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { enum phy phy = intel_port_to_phy(dev_priv, port); WARN_ON(port == PORT_C); /* * Pin mapping for RKL depends on which PCH is present. With TGP, the * final two outputs use type-c pins, even though they're actually * combo outputs. With CMP, the traditional DDI A-D pins are used for * all outputs. */ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C) return GMBUS_PIN_9_TC1_ICP + phy - PHY_C; return GMBUS_PIN_1_BXT + phy; } static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port) { enum phy phy = intel_port_to_phy(i915, port); drm_WARN_ON(&i915->drm, port == PORT_A); /* * Pin mapping for GEN9 BC depends on which PCH is present. With TGP, * final two outputs use type-c pins, even though they're actually * combo outputs. With CMP, the traditional DDI A-D pins are used for * all outputs. */ if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C) return GMBUS_PIN_9_TC1_ICP + phy - PHY_C; return GMBUS_PIN_1_BXT + phy; } static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { return intel_port_to_phy(dev_priv, port) + 1; } static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { enum phy phy = intel_port_to_phy(dev_priv, port); WARN_ON(port == PORT_B || port == PORT_C); /* * Pin mapping for ADL-S requires TC pins for all combo phy outputs * except first combo output. */ if (phy == PHY_A) return GMBUS_PIN_1_BXT; return GMBUS_PIN_9_TC1_ICP + phy - PHY_B; } static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { u8 ddc_pin; switch (port) { case PORT_B: ddc_pin = GMBUS_PIN_DPB; break; case PORT_C: ddc_pin = GMBUS_PIN_DPC; break; case PORT_D: ddc_pin = GMBUS_PIN_DPD; break; default: MISSING_CASE(port); ddc_pin = GMBUS_PIN_DPB; break; } return ddc_pin; } static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u8 ddc_pin; if (IS_ALDERLAKE_S(dev_priv)) ddc_pin = adls_port_to_ddc_pin(dev_priv, port); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) ddc_pin = dg1_port_to_ddc_pin(dev_priv, port); else if (IS_ROCKETLAKE(dev_priv)) ddc_pin = rkl_port_to_ddc_pin(dev_priv, port); else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv)) ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port); else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && HAS_PCH_TGP(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); else if (IS_CHERRYVIEW(dev_priv)) ddc_pin = chv_port_to_ddc_pin(dev_priv, port); else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); return ddc_pin; } static struct intel_encoder * get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; for_each_intel_encoder(&i915->drm, other) { if (other == encoder) continue; if (!intel_encoder_is_dig_port(other)) continue; if (enc_to_dig_port(other)->hdmi.ddc_bus == ddc_pin) return other; } return NULL; } static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; const char *source; u8 ddc_pin; ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); source = "VBT"; if (!ddc_pin) { ddc_pin = intel_hdmi_default_ddc_pin(encoder); source = "platform default"; } if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n", encoder->base.base.id, encoder->base.name, ddc_pin); return 0; } other = get_encoder_by_ddc_pin(encoder, ddc_pin); if (other) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name, ddc_pin, other->base.base.id, other->base.name); return 0; } drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n", encoder->base.base.id, encoder->base.name, ddc_pin, source); return ddc_pin; } void intel_infoframe_init(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dig_port->write_infoframe = vlv_write_infoframe; dig_port->read_infoframe = vlv_read_infoframe; dig_port->set_infoframes = vlv_set_infoframes; dig_port->infoframes_enabled = vlv_infoframes_enabled; } else if (IS_G4X(dev_priv)) { dig_port->write_infoframe = g4x_write_infoframe; dig_port->read_infoframe = g4x_read_infoframe; dig_port->set_infoframes = g4x_set_infoframes; dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) { dig_port->write_infoframe = lspcon_write_infoframe; dig_port->read_infoframe = lspcon_read_infoframe; dig_port->set_infoframes = lspcon_set_infoframes; dig_port->infoframes_enabled = lspcon_infoframes_enabled; } else { dig_port->write_infoframe = hsw_write_infoframe; dig_port->read_infoframe = hsw_read_infoframe; dig_port->set_infoframes = hsw_set_infoframes; dig_port->infoframes_enabled = hsw_infoframes_enabled; } } else if (HAS_PCH_IBX(dev_priv)) { dig_port->write_infoframe = ibx_write_infoframe; dig_port->read_infoframe = ibx_read_infoframe; dig_port->set_infoframes = ibx_set_infoframes; dig_port->infoframes_enabled = ibx_infoframes_enabled; } else { dig_port->write_infoframe = cpt_write_infoframe; dig_port->read_infoframe = cpt_read_infoframe; dig_port->set_infoframes = cpt_set_infoframes; dig_port->infoframes_enabled = cpt_infoframes_enabled; } } void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i2c_adapter *ddc; enum port port = intel_encoder->port; struct cec_connector_info conn_info; drm_dbg_kms(&dev_priv->drm, "Adding HDMI connector on [ENCODER:%d:%s]\n", intel_encoder->base.base.id, intel_encoder->base.name); if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A)) return; if (drm_WARN(dev, dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", dig_port->max_lanes, intel_encoder->base.base.id, intel_encoder->base.name)) return; intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder); if (!intel_hdmi->ddc_bus) return; ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); drm_connector_init_with_ddc(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, ddc); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); if (DISPLAY_VER(dev_priv) < 12) connector->interlace_allowed = true; connector->stereo_allowed = true; if (DISPLAY_VER(dev_priv) >= 10) connector->ycbcr_420_allowed = true; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; if (HAS_DDI(dev_priv)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); intel_hdmi->attached_connector = intel_connector; if (is_hdcp_supported(dev_priv, port)) { int ret = intel_hdcp_init(intel_connector, dig_port, &intel_hdmi_hdcp_shim); if (ret) drm_dbg_kms(&dev_priv->drm, "HDCP init failed, skipping.\n"); } /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ if (IS_G45(dev_priv)) { u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); intel_de_write(dev_priv, PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } cec_fill_conn_info_from_drm(&conn_info, connector); intel_hdmi->cec_notifier = cec_notifier_conn_register(dev->dev, port_identifier(port), &conn_info); if (!intel_hdmi->cec_notifier) drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n"); } /* * intel_hdmi_dsc_get_slice_height - get the dsc slice_height * @vactive: Vactive of a display mode * * @return: appropriate dsc slice height for a given mode. */ int intel_hdmi_dsc_get_slice_height(int vactive) { int slice_height; /* * Slice Height determination : HDMI2.1 Section 7.7.5.2 * Select smallest slice height >=96, that results in a valid PPS and * requires minimum padding lines required for final slice. * * Assumption : Vactive is even. */ for (slice_height = 96; slice_height <= vactive; slice_height += 2) if (vactive % slice_height == 0) return slice_height; return 0; } /* * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder * and dsc decoder capabilities * * @crtc_state: intel crtc_state * @src_max_slices: maximum slices supported by the DSC encoder * @src_max_slice_width: maximum slice width supported by DSC encoder * @hdmi_max_slices: maximum slices supported by sink DSC decoder * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink * * @return: num of dsc slices that can be supported by the dsc encoder * and decoder. */ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, int src_max_slices, int src_max_slice_width, int hdmi_max_slices, int hdmi_throughput) { /* Pixel rates in KPixels/sec */ #define HDMI_DSC_PEAK_PIXEL_RATE 2720000 /* * Rates at which the source and sink are required to process pixels in each * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz. */ #define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000 #define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000 /* Spec limits the slice width to 2720 pixels */ #define MAX_HDMI_SLICE_WIDTH 2720 int kslice_adjust; int adjusted_clk_khz; int min_slices; int target_slices; int max_throughput; /* max clock freq. in khz per slice */ int max_slice_width; int slice_width; int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock; if (!hdmi_throughput) return 0; /* * Slice Width determination : HDMI2.1 Section 7.7.5.1 * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as * for 4:4:4 is 1.0. Multiplying these factors by 10 and later * dividing adjusted clock value by 10. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 || crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) kslice_adjust = 10; else kslice_adjust = 5; /* * As per spec, the rate at which the source and the sink process * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz. * This depends upon the pixel clock rate and output formats * (kslice adjust). * If pixel clock * kslice adjust >= 2720MHz slices can be processed * at max 340MHz, otherwise they can be processed at max 400MHz. */ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10); if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE) max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0; else max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1; /* * Taking into account the sink's capability for maximum * clock per slice (in MHz) as read from HF-VSDB. */ max_throughput = min(max_throughput, hdmi_throughput * 1000); min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput); max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width); /* * Keep on increasing the num of slices/line, starting from min_slices * per line till we get such a number, for which the slice_width is * just less than max_slice_width. The slices/line selected should be * less than or equal to the max horizontal slices that the combination * of PCON encoder and HDMI decoder can support. */ slice_width = max_slice_width; do { if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1) target_slices = 1; else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2) target_slices = 2; else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4) target_slices = 4; else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8) target_slices = 8; else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12) target_slices = 12; else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16) target_slices = 16; else return 0; slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices); if (slice_width >= max_slice_width) min_slices = target_slices + 1; } while (slice_width >= max_slice_width); return target_slices; } /* * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on * source and sink capabilities. * * @src_fraction_bpp: fractional bpp supported by the source * @slice_width: dsc slice width supported by the source and sink * @num_slices: num of slices supported by the source and sink * @output_format: video output format * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink * * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel */ int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices, int output_format, bool hdmi_all_bpp, int hdmi_max_chunk_bytes) { int max_dsc_bpp, min_dsc_bpp; int target_bytes; bool bpp_found = false; int bpp_decrement_x16; int bpp_target; int bpp_target_x16; /* * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec * Start with the max bpp and keep on decrementing with * fractional bpp, if supported by PCON DSC encoder * * for each bpp we check if no of bytes can be supported by HDMI sink */ /* Assuming: bpc as 8*/ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { min_dsc_bpp = 6; max_dsc_bpp = 3 * 4; /* 3*bpc/2 */ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 || output_format == INTEL_OUTPUT_FORMAT_RGB) { min_dsc_bpp = 8; max_dsc_bpp = 3 * 8; /* 3*bpc */ } else { /* Assuming 4:2:2 encoding */ min_dsc_bpp = 7; max_dsc_bpp = 2 * 8; /* 2*bpc */ } /* * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink * Section 7.7.34 : Source shall not enable compressed Video * Transport with bpp_target settings above 12 bpp unless * DSC_all_bpp is set to 1. */ if (!hdmi_all_bpp) max_dsc_bpp = min(max_dsc_bpp, 12); /* * The Sink has a limit of compressed data in bytes for a scanline, * as described in max_chunk_bytes field in HFVSDB block of edid. * The no. of bytes depend on the target bits per pixel that the * source configures. So we start with the max_bpp and calculate * the target_chunk_bytes. We keep on decrementing the target_bpp, * till we get the target_chunk_bytes just less than what the sink's * max_chunk_bytes, or else till we reach the min_dsc_bpp. * * The decrement is according to the fractional support from PCON DSC * encoder. For fractional BPP we use bpp_target as a multiple of 16. * * bpp_target_x16 = bpp_target * 16 * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps * {1/16, 1/8, 1/4, 1/2, 1} respectively. */ bpp_target = max_dsc_bpp; /* src does not support fractional bpp implies decrement by 16 for bppx16 */ if (!src_fractional_bpp) src_fractional_bpp = 1; bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp); bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16; while (bpp_target_x16 > (min_dsc_bpp * 16)) { int bpp; bpp = DIV_ROUND_UP(bpp_target_x16, 16); target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8); if (target_bytes <= hdmi_max_chunk_bytes) { bpp_found = true; break; } bpp_target_x16 -= bpp_decrement_x16; } if (bpp_found) return bpp_target_x16; return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_hdmi.c
/* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2020 Google, Inc. * * Authors: * Sean Paul <[email protected]> */ #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/drm_print.h> #include "i915_reg.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_hdcp.h" #include "intel_hdcp_regs.h" static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) { switch (cpu_transcoder) { case TRANSCODER_A: return HDCP_STATUS_STREAM_A_ENC; case TRANSCODER_B: return HDCP_STATUS_STREAM_B_ENC; case TRANSCODER_C: return HDCP_STATUS_STREAM_C_ENC; case TRANSCODER_D: return HDCP_STATUS_STREAM_D_ENC; default: return 0; } } static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) { long ret; #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, msecs_to_jiffies(timeout)); if (!ret) DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); } static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u8 aksv[DRM_HDCP_KSV_LEN] = {}; ssize_t dpcd_ret; /* Output An first, that's easy */ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { drm_dbg_kms(&i915->drm, "Failed to write An over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; } /* * Since Aksv is Oh-So-Secret, we can't access it in software. So we * send an empty buffer of the correct length through the DP helpers. On * the other side, in the transfer hook, we'll generate a flag based on * the destination address which will tickle the hardware to output the * Aksv on our behalf after the header is sent. */ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV, aksv, DRM_HDCP_KSV_LEN); if (dpcd_ret != DRM_HDCP_KSV_LEN) { drm_dbg_kms(&i915->drm, "Failed to write Aksv over DP/AUX (%zd)\n", dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; } return 0; } static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { drm_dbg_kms(&i915->drm, "Read Bksv from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; } static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; /* * For some reason the HDMI and DP HDCP specs call this register * definition by different names. In the HDMI spec, it's called BSTATUS, * but in DP it's called BINFO. */ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { drm_dbg_kms(&i915->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; } static int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, u8 *bcaps) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, "Read bcaps from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; } static int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { ssize_t ret; u8 bcaps; ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); if (ret) return ret; *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; return 0; } static int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; } static int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } *ksv_ready = bstatus & DP_BSTATUS_READY; return 0; } static int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; int i; /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ for (i = 0; i < num_downstream; i += 3) { size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_KSV_FIFO, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); if (ret != len) { drm_dbg_kms(&i915->drm, "Read ksv[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } } return 0; } static int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) return -EINVAL; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { drm_dbg_kms(&i915->drm, "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } return 0; } static int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, enum transcoder cpu_transcoder, bool enable) { /* Not used for single stream DisplayPort setups */ return 0; } static bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bstatus; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return false; } return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); } static int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, bool *hdcp_capable) { ssize_t ret; u8 bcaps; ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); if (ret) return ret; *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; return 0; } struct hdcp2_dp_errata_stream_type { u8 msg_id; u8 stream_type; } __packed; struct hdcp2_dp_msg_data { u8 msg_id; u32 offset; bool msg_detectable; u32 timeout; u32 timeout2; /* Added for non_paired situation */ /* Timeout to read entire msg */ u32 msg_read_timeout; }; static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0, 0}, { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, false, HDCP_2_2_CERT_TIMEOUT_MS, 0, HDCP_2_2_DP_CERT_READ_TIMEOUT_MS}, { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, false, 0, 0, 0 }, { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, false, 0, 0, 0 }, { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS, HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS}, { HDCP_2_2_AKE_SEND_PAIRING_INFO, DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, HDCP_2_2_PAIRING_TIMEOUT_MS, 0, HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS }, { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0, 0 }, { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0, 0 }, { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 0, 0, 0 }, { HDCP_2_2_REP_SEND_RECVID_LIST, DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0, 0 }, { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 0, 0, 0 }, { HDCP_2_2_REP_STREAM_MANAGE, DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 0, 0, 0}, { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0, 0 }, /* local define to shovel this through the write_2_2 interface */ #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 0, 0 }, }; static int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, u8 *rx_status) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_DP_RXSTATUS_LEN); if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { drm_dbg_kms(&i915->drm, "Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; } static int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, u8 msg_id, bool *msg_ready) { u8 rx_status; int ret; *msg_ready = false; ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); if (ret < 0) return ret; switch (msg_id) { case HDCP_2_2_AKE_SEND_HPRIME: if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) *msg_ready = true; break; case HDCP_2_2_AKE_SEND_PAIRING_INFO: if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) *msg_ready = true; break; case HDCP_2_2_REP_SEND_RECVID_LIST: if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) *msg_ready = true; break; default: DRM_ERROR("Unidentified msg_id: %d\n", msg_id); return -EINVAL; } return 0; } static ssize_t intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; int ret, timeout; bool msg_ready = false; if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) timeout = hdcp2_msg_data->timeout2; else timeout = hdcp2_msg_data->timeout; /* * There is no way to detect the CERT, LPRIME and STREAM_READY * availability. So Wait for timeout and read the msg. */ if (!hdcp2_msg_data->msg_detectable) { mdelay(timeout); ret = 0; } else { /* * As we want to check the msg availability at timeout, Ignoring * the timeout at wait for CP_IRQ. */ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); ret = hdcp2_detect_msg_availability(dig_port, msg_id, &msg_ready); if (!msg_ready) ret = -ETIMEDOUT; } if (ret) drm_dbg_kms(&i915->drm, "msg_id %d, ret %d, timeout(mSec): %d\n", hdcp2_msg_data->msg_id, ret, timeout); return ret; } static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) { int i; for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) if (hdcp2_dp_msg_data[i].msg_id == msg_id) return &hdcp2_dp_msg_data[i]; return NULL; } static int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, void *buf, size_t size) { unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_write, len; const struct hdcp2_dp_msg_data *hdcp2_msg_data; hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); if (!hdcp2_msg_data) return -EINVAL; offset = hdcp2_msg_data->offset; /* No msg_id in DP HDCP2.2 msgs */ bytes_to_write = size - 1; byte++; while (bytes_to_write) { len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; ret = drm_dp_dpcd_write(&dig_port->dp.aux, offset, (void *)byte, len); if (ret < 0) return ret; bytes_to_write -= ret; byte += ret; offset += ret; } return size; } static ssize_t get_receiver_id_list_rx_info(struct intel_digital_port *dig_port, u32 *dev_cnt, u8 *byte) { ssize_t ret; u8 *rx_info = byte; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RXINFO_OFFSET, (void *)rx_info, HDCP_2_2_RXINFO_LEN); if (ret != HDCP_2_2_RXINFO_LEN) return ret >= 0 ? -EIO : ret; *dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); if (*dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) *dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; return ret; } static int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; const struct hdcp2_dp_msg_data *hdcp2_msg_data; ktime_t msg_end = ktime_set(0, 0); bool msg_expired; u32 dev_cnt; hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); if (!hdcp2_msg_data) return -EINVAL; offset = hdcp2_msg_data->offset; ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); if (ret < 0) return ret; hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); /* DP adaptation msgs has no msg_id */ byte++; if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { ret = get_receiver_id_list_rx_info(dig_port, &dev_cnt, byte); if (ret < 0) return ret; byte += ret; size = sizeof(struct hdcp2_rep_send_receiverid_list) - HDCP_2_2_RXINFO_LEN - HDCP_2_2_RECEIVER_IDS_MAX_LEN + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); offset += HDCP_2_2_RXINFO_LEN; } bytes_to_recv = size - 1; while (bytes_to_recv) { len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; /* Entire msg read timeout since initiate of msg read */ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) msg_end = ktime_add_ms(ktime_get_raw(), hdcp2_msg_data->msg_read_timeout); ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, (void *)byte, len); if (ret < 0) { drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", msg_id, ret); return ret; } bytes_to_recv -= ret; byte += ret; offset += ret; } if (hdcp2_msg_data->msg_read_timeout > 0) { msg_expired = ktime_after(ktime_get_raw(), msg_end); if (msg_expired) { drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n", msg_id, hdcp2_msg_data->msg_read_timeout); return -ETIMEDOUT; } } byte = buf; *byte = msg_id; return size; } static int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, bool is_repeater, u8 content_type) { int ret; struct hdcp2_dp_errata_stream_type stream_type_msg; if (is_repeater) return 0; /* * Errata for DP: As Stream type is used for encryption, Receiver * should be communicated with stream type for the decryption of the * content. * Repeater will be communicated with stream type as a part of it's * auth later in time. */ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; stream_type_msg.stream_type = content_type; ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, sizeof(stream_type_msg)); return ret < 0 ? ret : 0; } static int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { u8 rx_status; int ret; ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); if (ret) return ret; if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) ret = HDCP_REAUTH_REQUEST; else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) ret = HDCP_LINK_INTEGRITY_FAILURE; else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) ret = HDCP_TOPOLOGY_CHANGE; return ret; } static int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, bool *capable) { u8 rx_caps[3]; int ret; *capable = false; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RX_CAPS_OFFSET, rx_caps, HDCP_2_2_RXCAPS_LEN); if (ret != HDCP_2_2_RXCAPS_LEN) return ret >= 0 ? -EIO : ret; if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) *capable = true; return 0; } static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .write_an_aksv = intel_dp_hdcp_write_an_aksv, .read_bksv = intel_dp_hdcp_read_bksv, .read_bstatus = intel_dp_hdcp_read_bstatus, .repeater_present = intel_dp_hdcp_repeater_present, .read_ri_prime = intel_dp_hdcp_read_ri_prime, .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, .toggle_signalling = intel_dp_hdcp_toggle_signalling, .check_link = intel_dp_hdcp_check_link, .hdcp_capable = intel_dp_hdcp_capable, .write_2_2_msg = intel_dp_hdcp2_write_msg, .read_2_2_msg = intel_dp_hdcp2_read_msg, .config_stream_type = intel_dp_hdcp2_config_stream_type, .check_2_2_link = intel_dp_hdcp2_check_link, .hdcp_2_2_capable = intel_dp_hdcp2_capable, .protocol = HDCP_PROTOCOL_DP, }; static int intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector, bool enable) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, hdcp->stream_transcoder, enable, TRANS_DDI_HDCP_SELECT); if (ret) drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n", enable ? "Enable" : "Disable", ret); return ret; } static int intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, bool enable) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->stream_transcoder; u32 stream_enc_status; int ret; ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable); if (ret) return ret; stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder); if (!stream_enc_status) return -EINVAL; /* Wait for encryption confirmation */ if (intel_de_wait_for_register(i915, HDCP_STATUS(i915, cpu_transcoder, port), stream_enc_status, enable ? stream_enc_status : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); return -ETIMEDOUT; } return 0; } static int intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, bool enable) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->stream_transcoder; enum pipe pipe = (enum pipe)cpu_transcoder; enum port port = dig_port->base.port; int ret; drm_WARN_ON(&i915->drm, enable && !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port)) & AUTH_STREAM_TYPE) != data->streams[0].stream_type); ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable); if (ret) return ret; /* Wait for encryption confirmation */ if (intel_de_wait_for_register(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe), STREAM_ENCRYPTION_STATUS, enable ? STREAM_ENCRYPTION_STATUS : 0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n", transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled"); return -ETIMEDOUT; } return 0; } static int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; int ret; /* * We do need to do the Link Check only for the connector involved with * HDCP port authentication and encryption. * We can re-use the hdcp->is_repeater flag to know that the connector * involved with HDCP port authentication and encryption. */ if (hdcp->is_repeater) { ret = intel_dp_hdcp2_check_link(dig_port, connector); if (ret) return ret; } return 0; } static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { .write_an_aksv = intel_dp_hdcp_write_an_aksv, .read_bksv = intel_dp_hdcp_read_bksv, .read_bstatus = intel_dp_hdcp_read_bstatus, .repeater_present = intel_dp_hdcp_repeater_present, .read_ri_prime = intel_dp_hdcp_read_ri_prime, .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, .toggle_signalling = intel_dp_hdcp_toggle_signalling, .stream_encryption = intel_dp_mst_hdcp_stream_encryption, .check_link = intel_dp_hdcp_check_link, .hdcp_capable = intel_dp_hdcp_capable, .write_2_2_msg = intel_dp_hdcp2_write_msg, .read_2_2_msg = intel_dp_hdcp2_read_msg, .config_stream_type = intel_dp_hdcp2_config_stream_type, .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption, .check_2_2_link = intel_dp_mst_hdcp2_check_link, .hdcp_2_2_capable = intel_dp_hdcp2_capable, .protocol = HDCP_PROTOCOL_DP, }; int intel_dp_hdcp_init(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder = &dig_port->base; enum port port = intel_encoder->port; struct intel_dp *intel_dp = &dig_port->dp; if (!is_hdcp_supported(dev_priv, port)) return 0; if (intel_connector->mst_port) return intel_hdcp_init(intel_connector, dig_port, &intel_dp_mst_hdcp_shim); else if (!intel_dp_is_edp(intel_dp)) return intel_hdcp_init(intel_connector, dig_port, &intel_dp_hdcp_shim); return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation * */ #include "gem/i915_gem_internal.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsb.h" #include "intel_dsb_regs.h" struct i915_vma; enum dsb_id { INVALID_DSB = -1, DSB1, DSB2, DSB3, MAX_DSB_PER_PIPE }; struct intel_dsb { enum dsb_id id; u32 *cmd_buf; struct i915_vma *vma; struct intel_crtc *crtc; /* * maximum number of dwords the buffer will hold. */ unsigned int size; /* * free_pos will point the first free dword and * help in calculating tail of command buffer. */ unsigned int free_pos; /* * ins_start_offset will help to store start dword of the dsb * instuction and help in identifying the batch of auto-increment * register. */ unsigned int ins_start_offset; }; /** * DOC: DSB * * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA * engine that can be programmed to download the DSB from memory. * It allows driver to batch submit display HW programming. This helps to * reduce loading time and CPU activity, thereby making the context switch * faster. DSB Support added from Gen12 Intel graphics based platform. * * DSB's can access only the pipe, plane, and transcoder Data Island Packet * registers. * * DSB HW can support only register writes (both indexed and direct MMIO * writes). There are no registers reads possible with DSB HW engine. */ /* DSB opcodes. */ #define DSB_OPCODE_SHIFT 24 #define DSB_OPCODE_NOOP 0x0 #define DSB_OPCODE_MMIO_WRITE 0x1 #define DSB_OPCODE_WAIT_USEC 0x2 #define DSB_OPCODE_WAIT_LINES 0x3 #define DSB_OPCODE_WAIT_VBLANKS 0x4 #define DSB_OPCODE_WAIT_DSL_IN 0x5 #define DSB_OPCODE_WAIT_DSL_OUT 0x6 #define DSB_OPCODE_INTERRUPT 0x7 #define DSB_OPCODE_INDEXED_WRITE 0x9 #define DSB_OPCODE_POLL 0xA #define DSB_BYTE_EN 0xF #define DSB_BYTE_EN_SHIFT 20 #define DSB_REG_VALUE_MASK 0xfffff static bool assert_dsb_has_room(struct intel_dsb *dsb) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* each instruction is 2 dwords */ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2, "[CRTC:%d:%s] DSB %d buffer overflow\n", crtc->base.base.id, crtc->base.name, dsb->id); } static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, enum dsb_id id) { return intel_de_read(i915, DSB_CTRL(pipe, id)) & DSB_STATUS_BUSY; } static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) { u32 *buf = dsb->cmd_buf; if (!assert_dsb_has_room(dsb)) return; /* Every instruction should be 8 byte aligned. */ dsb->free_pos = ALIGN(dsb->free_pos, 2); dsb->ins_start_offset = dsb->free_pos; buf[dsb->free_pos++] = ldw; buf[dsb->free_pos++] = udw; } static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, u32 opcode, i915_reg_t reg) { const u32 *buf = dsb->cmd_buf; u32 prev_opcode, prev_reg; prev_opcode = buf[dsb->ins_start_offset + 1] >> DSB_OPCODE_SHIFT; prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK; return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); } static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg) { return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_MMIO_WRITE, reg); } static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg) { return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_INDEXED_WRITE, reg); } /** * intel_dsb_reg_write() - Emit register wriite to the DSB context * @dsb: DSB context * @reg: register address. * @val: value. * * This function is used for writing register-value pair in command * buffer of DSB. */ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { /* * For example the buffer will look like below for 3 dwords for auto * increment register: * +--------------------------------------------------------+ * | size = 3 | offset &| value1 | value2 | value3 | zero | * | | opcode | | | | | * +--------------------------------------------------------+ * + + + + + + + * 0 4 8 12 16 20 24 * Byte * * As every instruction is 8 byte aligned the index of dsb instruction * will start always from even number while dealing with u32 array. If * we are writing odd no of dwords, Zeros will be added in the end for * padding. */ if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) && !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) { intel_dsb_emit(dsb, val, (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | i915_mmio_reg_offset(reg)); } else { u32 *buf = dsb->cmd_buf; if (!assert_dsb_has_room(dsb)) return; /* convert to indexed write? */ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { u32 prev_val = buf[dsb->ins_start_offset + 0]; buf[dsb->ins_start_offset + 0] = 1; /* count */ buf[dsb->ins_start_offset + 1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | i915_mmio_reg_offset(reg); buf[dsb->ins_start_offset + 2] = prev_val; dsb->free_pos++; } buf[dsb->free_pos++] = val; /* Update the count */ buf[dsb->ins_start_offset]++; /* if number of data words is odd, then the last dword should be 0.*/ if (dsb->free_pos & 0x1) buf[dsb->free_pos] = 0; } } static void intel_dsb_align_tail(struct intel_dsb *dsb) { u32 aligned_tail, tail; tail = dsb->free_pos * 4; aligned_tail = ALIGN(tail, CACHELINE_BYTES); if (aligned_tail > tail) memset(&dsb->cmd_buf[dsb->free_pos], 0, aligned_tail - tail); dsb->free_pos = aligned_tail / 4; } void intel_dsb_finish(struct intel_dsb *dsb) { intel_dsb_align_tail(dsb); } /** * intel_dsb_commit() - Trigger workload execution of DSB. * @dsb: DSB context * @wait_for_vblank: wait for vblank before executing * * This function is used to do actual write to hardware using DSB. */ void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tail; tail = dsb->free_pos * 4; if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) return; if (is_dsb_busy(dev_priv, pipe, dsb->id)) { drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n", crtc->base.base.id, crtc->base.name, dsb->id); return; } intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) | DSB_ENABLE); intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); } void intel_dsb_wait(struct intel_dsb *dsb) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d timed out waiting for idle\n", crtc->base.base.id, crtc->base.name, dsb->id); /* Attempt to reset it */ dsb->free_pos = 0; dsb->ins_start_offset = 0; intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0); } /** * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer. * @crtc: the CRTC * @max_cmds: number of commands we need to fit into command buffer * * This function prepare the command buffer which is used to store dsb * instructions with data. * * Returns: * DSB context, NULL on failure */ struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc, unsigned int max_cmds) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; struct intel_dsb *dsb; struct i915_vma *vma; unsigned int size; u32 *buf; if (!HAS_DSB(i915)) return NULL; dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* ~1 qword per instruction, full cachelines */ size = ALIGN(max_cmds * 8, CACHELINE_BYTES); obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size)); if (IS_ERR(obj)) goto out_put_rpm; vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { i915_gem_object_put(obj); goto out_put_rpm; } buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); if (IS_ERR(buf)) { i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); goto out_put_rpm; } intel_runtime_pm_put(&i915->runtime_pm, wakeref); dsb->id = DSB1; dsb->vma = vma; dsb->crtc = crtc; dsb->cmd_buf = buf; dsb->size = size / 4; /* in dwords */ dsb->free_pos = 0; dsb->ins_start_offset = 0; return dsb; out_put_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); kfree(dsb); out: drm_info_once(&i915->drm, "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", crtc->base.base.id, crtc->base.name, DSB1); return NULL; } /** * intel_dsb_cleanup() - To cleanup DSB context. * @dsb: DSB context * * This function cleanup the DSB context by unpinning and releasing * the VMA object associated with it. */ void intel_dsb_cleanup(struct intel_dsb *dsb) { i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); kfree(dsb); }
linux-master
drivers/gpu/drm/i915/display/intel_dsb.c
/* * Copyright © 2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * DOC: atomic modeset support * * The functions here implement the state management and hardware programming * dispatch required by the atomic modeset infrastructure. * See intel_atomic_plane.c for the plane-specific atomic functionality. */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" #include "intel_fb.h" #include "skl_universal_plane.h" /** * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property. * @connector: Connector to get the property for. * @state: Connector state to retrieve the property from. * @property: Property to retrieve. * @val: Return value for the property. * * Returns the atomic property value for a digital connector. */ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, u64 *val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); if (property == dev_priv->display.properties.force_audio) *val = intel_conn_state->force_audio; else if (property == dev_priv->display.properties.broadcast_rgb) *val = intel_conn_state->broadcast_rgb; else { drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n", property->base.id, property->name); return -EINVAL; } return 0; } /** * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property. * @connector: Connector to set the property for. * @state: Connector state to set the property on. * @property: Property to set. * @val: New value for the property. * * Sets the atomic property value for a digital connector. */ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, u64 val) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); if (property == dev_priv->display.properties.force_audio) { intel_conn_state->force_audio = val; return 0; } if (property == dev_priv->display.properties.broadcast_rgb) { intel_conn_state->broadcast_rgb = val; return 0; } drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n", property->base.id, property->name); return -EINVAL; } int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state) { struct drm_connector_state *new_state = drm_atomic_get_new_connector_state(state, conn); struct intel_digital_connector_state *new_conn_state = to_intel_digital_connector_state(new_state); struct drm_connector_state *old_state = drm_atomic_get_old_connector_state(state, conn); struct intel_digital_connector_state *old_conn_state = to_intel_digital_connector_state(old_state); struct drm_crtc_state *crtc_state; intel_hdcp_atomic_check(conn, old_state, new_state); if (!new_state->crtc) return 0; crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); /* * These properties are handled by fastset, and might not end * up in a modeset. */ if (new_conn_state->force_audio != old_conn_state->force_audio || new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || new_conn_state->base.colorspace != old_conn_state->base.colorspace || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode || new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state || !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) crtc_state->mode_changed = true; return 0; } /** * intel_digital_connector_duplicate_state - duplicate connector state * @connector: digital connector * * Allocates and returns a copy of the connector state (both common and * digital connector specific) for the specified connector. * * Returns: The newly allocated connector state, or NULL on failure. */ struct drm_connector_state * intel_digital_connector_duplicate_state(struct drm_connector *connector) { struct intel_digital_connector_state *state; state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_connector_duplicate_state(connector, &state->base); return &state->base; } /** * intel_connector_needs_modeset - check if connector needs a modeset * @state: the atomic state corresponding to this modeset * @connector: the connector */ bool intel_connector_needs_modeset(struct intel_atomic_state *state, struct drm_connector *connector) { const struct drm_connector_state *old_conn_state, *new_conn_state; old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector); new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector); return old_conn_state->crtc != new_conn_state->crtc || (new_conn_state->crtc && drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base, new_conn_state->crtc))); } /** * intel_any_crtc_needs_modeset - check if any CRTC needs a modeset * @state: the atomic state corresponding to this modeset * * Returns true if any CRTC in @state needs a modeset. */ bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (intel_crtc_needs_modeset(crtc_state)) return true; } return false; } struct intel_digital_connector_state * intel_atomic_get_digital_connector_state(struct intel_atomic_state *state, struct intel_connector *connector) { struct drm_connector_state *conn_state; conn_state = drm_atomic_get_connector_state(&state->base, &connector->base); if (IS_ERR(conn_state)) return ERR_CAST(conn_state); return to_intel_digital_connector_state(conn_state); } /** * intel_crtc_duplicate_state - duplicate crtc state * @crtc: drm crtc * * Allocates and returns a copy of the crtc state (both common and * Intel-specific) for the specified crtc. * * Returns: The newly allocated crtc state, or NULL on failure. */ struct drm_crtc_state * intel_crtc_duplicate_state(struct drm_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state); struct intel_crtc_state *crtc_state; crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL); if (!crtc_state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi); /* copy color blobs */ if (crtc_state->hw.degamma_lut) drm_property_blob_get(crtc_state->hw.degamma_lut); if (crtc_state->hw.ctm) drm_property_blob_get(crtc_state->hw.ctm); if (crtc_state->hw.gamma_lut) drm_property_blob_get(crtc_state->hw.gamma_lut); if (crtc_state->pre_csc_lut) drm_property_blob_get(crtc_state->pre_csc_lut); if (crtc_state->post_csc_lut) drm_property_blob_get(crtc_state->post_csc_lut); crtc_state->update_pipe = false; crtc_state->disable_lp_wm = false; crtc_state->disable_cxsr = false; crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; crtc_state->fifo_changed = false; crtc_state->preload_luts = false; crtc_state->wm.need_postvbl_update = false; crtc_state->do_async_flip = false; crtc_state->fb_bits = 0; crtc_state->update_planes = 0; crtc_state->dsb = NULL; return &crtc_state->uapi; } static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state) { drm_property_blob_put(crtc_state->hw.degamma_lut); drm_property_blob_put(crtc_state->hw.gamma_lut); drm_property_blob_put(crtc_state->hw.ctm); drm_property_blob_put(crtc_state->pre_csc_lut); drm_property_blob_put(crtc_state->post_csc_lut); } void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state) { intel_crtc_put_color_blobs(crtc_state); } /** * intel_crtc_destroy_state - destroy crtc state * @crtc: drm crtc * @state: the state to destroy * * Destroys the crtc state (both common and Intel-specific) for the * specified crtc. */ void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); drm_WARN_ON(crtc->dev, crtc_state->dsb); __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); kfree(crtc_state); } struct drm_atomic_state * intel_atomic_state_alloc(struct drm_device *dev) { struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state || drm_atomic_state_init(dev, &state->base) < 0) { kfree(state); return NULL; } return &state->base; } void intel_atomic_state_free(struct drm_atomic_state *_state) { struct intel_atomic_state *state = to_intel_atomic_state(_state); drm_atomic_state_default_release(&state->base); kfree(state->global_objs); i915_sw_fence_fini(&state->commit_ready); kfree(state); } void intel_atomic_state_clear(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); intel_atomic_clear_global_state(state); /* state->internal not reset on purpose */ state->dpll_set = state->modeset = false; } struct intel_crtc_state * intel_atomic_get_crtc_state(struct drm_atomic_state *state, struct intel_crtc *crtc) { struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_crtc_state(state, &crtc->base); if (IS_ERR(crtc_state)) return ERR_CAST(crtc_state); return to_intel_crtc_state(crtc_state); }
linux-master
drivers/gpu/drm/i915/display/intel_atomic.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020-2021 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "i915_trace.h" #include "intel_bios.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_dp_aux_regs.h" #include "intel_pps.h" #include "intel_tc.h" u32 intel_dp_aux_pack(const u8 *src, int src_bytes) { int i; u32 v = 0; if (src_bytes > 4) src_bytes = 4; for (i = 0; i < src_bytes; i++) v |= ((u32)src[i]) << ((3 - i) * 8); return v; } static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) { int i; if (dst_bytes > 4) dst_bytes = 4; for (i = 0; i < dst_bytes; i++) dst[i] = src >> ((3 - i) * 8); } static u32 intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); const unsigned int timeout_ms = 10; u32 status; int ret; ret = __intel_de_wait_for_register(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0, 2, timeout_ms, &status); if (ret == -ETIMEDOUT) drm_err(&i915->drm, "%s: did not complete or timeout within %ums (status 0x%08x)\n", intel_dp->aux.name, timeout_ms, status); return status; } static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (index) return 0; /* * The clock divider is based off the hrawclk, and would like to run at * 2MHz. So, take the hrawclk value and divide by 2000 and use that */ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); } static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 freq; if (index) return 0; /* * The clock divider is based off the cdclk or PCH rawclk, and would * like to run at 2MHz. So, take the cdclk or PCH rawclk value and * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) freq = dev_priv->display.cdclk.hw.cdclk; else freq = RUNTIME_INFO(dev_priv)->rawclk_freq; return DIV_ROUND_CLOSEST(freq, 2000); } static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ switch (index) { case 0: return 63; case 1: return 72; default: return 0; } } return ilk_get_aux_clock_divider(intel_dp, index); } static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { /* * SKL doesn't need us to program the AUX clock divider (Hardware will * derive the clock from CDCLK automatically). We still implement the * get_aux_clock_divider vfunc to plug-in into the existing code. */ return index ? 0 : 1; } static int intel_dp_aux_sync_len(void) { int precharge = 16; /* 10-16 */ int preamble = 16; return precharge + preamble; } static int intel_dp_aux_fw_sync_len(void) { int precharge = 10; /* 10-16 */ int preamble = 8; return precharge + preamble; } static int g4x_dp_aux_precharge_len(void) { int precharge_min = 10; int preamble = 16; /* HW wants the length of the extra precharge in 2us units */ return (intel_dp_aux_sync_len() - precharge_min - preamble) / 2; } static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, u32 aux_clock_divider) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); u32 timeout; /* Max timeout value on G4x-BDW: 1.6ms */ if (IS_BROADWELL(dev_priv)) timeout = DP_AUX_CH_CTL_TIME_OUT_600us; else timeout = DP_AUX_CH_CTL_TIME_OUT_400us; return DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | timeout | DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) | DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider); } static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, u32 unused) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 ret; /* * Max timeout values: * SKL-GLK: 1.6ms * ICL+: 4ms */ ret = DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); if (intel_tc_port_in_tbt_alt_mode(dig_port)) ret |= DP_AUX_CH_CTL_TBT_IO; /* * Power request bit is already set during aux power well enable. * Preserve the bit across aux transactions. */ if (DISPLAY_VER(i915) >= 14) ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; return ret; } static int intel_dp_aux_xfer(struct intel_dp *intel_dp, const u8 *send, int send_bytes, u8 *recv, int recv_size, u32 aux_send_ctl_flags) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain; intel_wakeref_t aux_wakeref; intel_wakeref_t pps_wakeref; int i, ret, recv_bytes; int try, clock = 0; u32 status; bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); if (is_tc_port) { intel_tc_port_lock(dig_port); /* * Abort transfers on a disconnected port as required by * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX * timeouts that would otherwise happen. * TODO: abort the transfer on non-TC ports as well. */ if (!intel_tc_port_connected_locked(&dig_port->base)) { ret = -ENXIO; goto out_unlock; } } aux_domain = intel_aux_power_domain(dig_port); aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = intel_pps_lock(intel_dp); /* * We will be called with VDD already enabled for dpcd/edid/oui reads. * In such cases we want to leave VDD enabled and it's up to upper layers * to turn it off. But for eg. i2c-dev access we need to turn it on/off * ourselves. */ vdd = intel_pps_vdd_on_unlocked(intel_dp); /* * dp aux is extremely sensitive to irq latency, hence request the * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); intel_pps_check_power_unlocked(intel_dp); /* * FIXME PSR should be disabled here to prevent * it using the same AUX CH simultaneously */ /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = intel_de_read_notrace(i915, ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; msleep(1); } /* just trace the final value */ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (try == 3) { const u32 status = intel_de_read(i915, ch_ctl); if (status != intel_dp->aux_busy_last_status) { drm_WARN(&i915->drm, 1, "%s: not started (status 0x%08x)\n", intel_dp->aux.name, status); intel_dp->aux_busy_last_status = status; } ret = -EBUSY; goto out; } /* Only 5 data registers! */ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { ret = -E2BIG; goto out; } while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, send_bytes, aux_clock_divider); send_ctl |= aux_send_ctl_flags; /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) intel_de_write(i915, ch_data[i >> 2], intel_dp_aux_pack(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ intel_de_write(i915, ch_ctl, send_ctl); status = intel_dp_aux_wait_done(intel_dp); /* Clear done status and any errors */ intel_de_write(i915, ch_ctl, status | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); /* * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 * 400us delay required for errors and timeouts * Timeout errors from the HW already meet this * requirement so skip to next iteration */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) continue; if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { usleep_range(400, 500); continue; } if (status & DP_AUX_CH_CTL_DONE) goto done; } } if ((status & DP_AUX_CH_CTL_DONE) == 0) { drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", intel_dp->aux.name, status); ret = -EBUSY; goto out; } done: /* * Check for timeout or receive error. Timeouts occur when the sink is * not connected. */ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", intel_dp->aux.name, status); ret = -EIO; goto out; } /* * Timeouts occur when the device isn't connected, so they're "normal" * -- don't fill the kernel log with these */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", intel_dp->aux.name, status); ret = -ETIMEDOUT; goto out; } /* Unload any bytes sent back from the other side */ recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status); /* * By BSpec: "Message sizes of 0 or >20 are not allowed." * We have no idea of what happened so we return -EBUSY so * drm layer takes care for the necessary retries. */ if (recv_bytes == 0 || recv_bytes > 20) { drm_dbg_kms(&i915->drm, "%s: Forbidden recv_bytes = %d on aux transaction\n", intel_dp->aux.name, recv_bytes); ret = -EBUSY; goto out; } if (recv_bytes > recv_size) recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; out: cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) intel_pps_vdd_off_unlocked(intel_dp, false); intel_pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); out_unlock: if (is_tc_port) intel_tc_port_unlock(dig_port); return ret; } #define BARE_ADDRESS_SIZE 3 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) static void intel_dp_aux_header(u8 txbuf[HEADER_SIZE], const struct drm_dp_aux_msg *msg) { txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); txbuf[1] = (msg->address >> 8) & 0xff; txbuf[2] = msg->address & 0xff; txbuf[3] = msg->size - 1; } static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) { /* * If we're trying to send the HDCP Aksv, we need to set a the Aksv * select bit to inform the hardware to send the Aksv after our header * since we can't access that data from software. */ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && msg->address == DP_AUX_HDCP_AKSV) return DP_AUX_CH_CTL_AUX_AKSV_SELECT; return 0; } static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; u32 flags = intel_dp_aux_xfer_flags(msg); int ret; intel_dp_aux_header(txbuf, msg); switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: case DP_AUX_I2C_WRITE_STATUS_UPDATE: txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; rxsize = 2; /* 0 or 1 data bytes */ if (drm_WARN_ON(&i915->drm, txsize > 20)) return -E2BIG; drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; if (ret > 1) { /* Number of bytes written in a short write. */ ret = clamp_t(int, rxbuf[1], 0, msg->size); } else { /* Return payload size. */ ret = msg->size; } } break; case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; rxsize = msg->size + 1; if (drm_WARN_ON(&i915->drm, rxsize > 20)) return -E2BIG; ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; /* * Assume happy day, and copy the data. The caller is * expected to check msg->reply before touching it. * * Return payload size. */ ret--; memcpy(msg->buffer, rxbuf + 1, ret); } break; default: ret = -EINVAL; break; } return ret; } static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: return DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); return DP_AUX_CH_CTL(AUX_CH_B); } } static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: return DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); return DP_AUX_CH_DATA(AUX_CH_B, index); } } static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: return DP_AUX_CH_CTL(aux_ch); case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: return PCH_DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); return DP_AUX_CH_CTL(AUX_CH_A); } } static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: return DP_AUX_CH_DATA(aux_ch, index); case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: return PCH_DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); return DP_AUX_CH_DATA(AUX_CH_A, index); } } static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: return DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); return DP_AUX_CH_CTL(AUX_CH_A); } } static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: return DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); return DP_AUX_CH_DATA(AUX_CH_A, index); } } static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_C: case AUX_CH_USBC1: case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ return DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); return DP_AUX_CH_CTL(AUX_CH_A); } } static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_C: case AUX_CH_USBC1: case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ return DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); return DP_AUX_CH_DATA(AUX_CH_A, index); } } static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_USBC1: case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: return XELPDP_DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); return XELPDP_DP_AUX_CH_CTL(AUX_CH_A); } } static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: case AUX_CH_B: case AUX_CH_USBC1: case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: return XELPDP_DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index); } } void intel_dp_aux_fini(struct intel_dp *intel_dp) { if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) cpu_latency_qos_remove_request(&intel_dp->pm_qos); kfree(intel_dp->aux.name); } void intel_dp_aux_init(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum aux_ch aux_ch = dig_port->aux_ch; if (DISPLAY_VER(dev_priv) >= 14) { intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; } else if (DISPLAY_VER(dev_priv) >= 12) { intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; intel_dp->aux_ch_data_reg = tgl_aux_data_reg; } else if (DISPLAY_VER(dev_priv) >= 9) { intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; intel_dp->aux_ch_data_reg = skl_aux_data_reg; } else if (HAS_PCH_SPLIT(dev_priv)) { intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; intel_dp->aux_ch_data_reg = ilk_aux_data_reg; } else { intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; intel_dp->aux_ch_data_reg = g4x_aux_data_reg; } if (DISPLAY_VER(dev_priv) >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; else if (HAS_PCH_SPLIT(dev_priv)) intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; else intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; if (DISPLAY_VER(dev_priv) >= 9) intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; else intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; intel_dp->aux.drm_dev = &dev_priv->drm; drm_dp_aux_init(&intel_dp->aux); /* Failure to allocate our preferred name is not critical */ if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD) intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D), encoder->base.name); else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", aux_ch - AUX_CH_USBC1 + '1', encoder->base.name); else intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", aux_ch_name(aux_ch), encoder->base.name); intel_dp->aux.transfer = intel_dp_aux_transfer; cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); } static enum aux_ch default_aux_ch(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); /* SKL has DDI E but no AUX E */ if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) return AUX_CH_A; return (enum aux_ch)encoder->port; } static struct intel_encoder * get_encoder_by_aux_ch(struct intel_encoder *encoder, enum aux_ch aux_ch) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; for_each_intel_encoder(&i915->drm, other) { if (other == encoder) continue; if (!intel_encoder_is_dig_port(other)) continue; if (enc_to_dig_port(other)->aux_ch == aux_ch) return other; } return NULL; } enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_encoder *other; const char *source; enum aux_ch aux_ch; aux_ch = intel_bios_dp_aux_ch(encoder->devdata); source = "VBT"; if (aux_ch == AUX_CH_NONE) { aux_ch = default_aux_ch(encoder); source = "platform default"; } if (aux_ch == AUX_CH_NONE) return AUX_CH_NONE; /* FIXME validate aux_ch against platform caps */ other = get_encoder_by_aux_ch(encoder, aux_ch); if (other) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] AUX CH %c already claimed by [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name, aux_ch_name(aux_ch), other->base.base.id, other->base.name); return AUX_CH_NONE; } drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Using AUX CH %c (%s)\n", encoder->base.base.id, encoder->base.name, aux_ch_name(aux_ch), source); return aux_ch; } void intel_dp_aux_irq_handler(struct drm_i915_private *i915) { wake_up_all(&i915->display.gmbus.wait_queue); }
linux-master
drivers/gpu/drm/i915/display/intel_dp_aux.c
/* * Copyright © 2013 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Shobhit Kumar <[email protected]> * Yogesh Mohan Marimuthu <[email protected]> */ #include <linux/kernel.h> #include <linux/string_helpers.h> #include "i915_drv.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_pll_regs.h" #include "vlv_sideband.h" static const u16 lfsr_converts[] = { 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */ }; /* Get DSI clock from pixel clock */ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt, int lane_count) { u32 dsi_clk_khz; u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt); /* DSI data rate = pixel clock * bits per pixel / lane count pixel clock is converted from KHz to Hz */ dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count); return dsi_clk_khz; } static int dsi_calc_mnp(struct drm_i915_private *dev_priv, struct intel_crtc_state *config, int target_dsi_clk) { unsigned int m_min, m_max, p_min = 2, p_max = 6; unsigned int m, n, p; unsigned int calc_m, calc_p; int delta, ref_clk; /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { drm_err(&dev_priv->drm, "DSI CLK Out of Range\n"); return -ECHRNG; } if (IS_CHERRYVIEW(dev_priv)) { ref_clk = 100000; n = 4; m_min = 70; m_max = 96; } else { ref_clk = 25000; n = 1; m_min = 62; m_max = 92; } calc_p = p_min; calc_m = m_min; delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n)); for (m = m_min; m <= m_max && delta; m++) { for (p = p_min; p <= p_max && delta; p++) { /* * Find the optimal m and p divisors with minimal delta * +/- the required clock */ int calc_dsi_clk = (m * ref_clk) / (p * n); int d = abs(target_dsi_clk - calc_dsi_clk); if (d < delta) { delta = d; calc_m = m; calc_p = p; } } } /* register has log2(N1), this works fine for powers of two */ config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); config->dsi_pll.div = (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT | (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT; return 0; } static int vlv_dsi_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; int i; pll_ctl = config->dsi_pll.ctrl; pll_div = config->dsi_pll.div; /* mask out other bits and extract the P1 divisor */ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); /* N1 divisor */ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; n = 1 << n; /* register has log2(N1) */ /* mask out the other bits and extract the M1 divisor */ pll_div &= DSI_PLL_M1_DIV_MASK; pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; while (pll_ctl) { pll_ctl = pll_ctl >> 1; p++; } p--; if (!p) { drm_err(&dev_priv->drm, "wrong P1 divisor\n"); return 0; } for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { if (lfsr_converts[i] == pll_div) break; } if (i == ARRAY_SIZE(lfsr_converts)) { drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); return 0; } m = i + 62; dsi_clock = (m * refclk) / (p * n); return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); } /* * XXX: The muxing and gating is hard coded for now. Need to add support for * sharing PLLs with two DSI outputs. */ int vlv_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int pclk, dsi_clk, ret; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); ret = dsi_calc_mnp(dev_priv, config, dsi_clk); if (ret) { drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n"); return ret; } if (intel_dsi->ports & (1 << PORT_A)) config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL; if (intel_dsi->ports & (1 << PORT_C)) config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL; config->dsi_pll.ctrl |= DSI_PLL_VCO_EN; drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", config->dsi_pll.div, config->dsi_pll.ctrl); pclk = vlv_dsi_pclk(encoder, config); config->port_clock = pclk; /* FIXME definitely not right for burst/cmd mode/pixel overlap */ config->hw.adjusted_mode.crtc_clock = pclk; if (intel_dsi->dual_link) config->hw.adjusted_mode.crtc_clock *= 2; return 0; } void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0); vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div); vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN); /* wait at least 0.5 us after ungating before enabling VCO, * allow hrtimer subsystem optimization by relaxing timing */ usleep_range(10, 50); vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl); if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_LOCK, 20)) { vlv_cck_put(dev_priv); drm_err(&dev_priv->drm, "DSI PLL lock failed\n"); return; } vlv_cck_put(dev_priv); drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); } void vlv_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); tmp &= ~DSI_PLL_VCO_EN; tmp |= DSI_PLL_LDO_GATE; vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp); vlv_cck_put(dev_priv); } bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) { bool enabled; u32 val; u32 mask; mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED; val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); enabled = (val & mask) == mask; if (!enabled) return false; /* * Dividers must be programmed with valid values. As per BSEPC, for * GEMINLAKE only PORT A divider values are checked while for BXT * both divider values are validated. Check this here for * paranoia, since BIOS is known to misconfigure PLLs in this way at * times, and since accessing DSI registers with invalid dividers * causes a system hang. */ val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); if (IS_GEMINILAKE(dev_priv)) { if (!(val & BXT_DSIA_16X_MASK)) { drm_dbg(&dev_priv->drm, "Invalid PLL divider (%08x)\n", val); enabled = false; } } else { if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) { drm_dbg(&dev_priv->drm, "Invalid PLL divider (%08x)\n", val); enabled = false; } } return enabled; } void bxt_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); drm_dbg_kms(&dev_priv->drm, "\n"); intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); /* * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) drm_err(&dev_priv->drm, "Timeout waiting for PLL lock deassertion\n"); } u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 pll_ctl, pll_div; drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER); vlv_cck_put(dev_priv); config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK; config->dsi_pll.div = pll_div; return vlv_dsi_pclk(encoder, config); } static int bxt_dsi_pclk(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_ratio, dsi_clk; dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); } u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 pclk; config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); pclk = bxt_dsi_pclk(encoder, config); drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); return pclk; } void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); temp = intel_de_read(dev_priv, MIPI_CTRL(port)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; intel_de_write(dev_priv, MIPI_CTRL(port), temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT); } static void glk_dsi_program_esc_clock(struct drm_device *dev, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(dev); u32 dsi_rate = 0; u32 pll_ratio = 0; u32 ddr_clk = 0; u32 div1_value = 0; u32 div2_value = 0; u32 txesc1_div = 0; u32 txesc2_div = 0; pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; ddr_clk = dsi_rate / 2; /* Variable divider value */ div1_value = DIV_ROUND_CLOSEST(ddr_clk, 20000); /* Calculate TXESC1 divider */ if (div1_value <= 10) txesc1_div = div1_value; else if ((div1_value > 10) && (div1_value <= 20)) txesc1_div = DIV_ROUND_UP(div1_value, 2); else if ((div1_value > 20) && (div1_value <= 30)) txesc1_div = DIV_ROUND_UP(div1_value, 4); else if ((div1_value > 30) && (div1_value <= 40)) txesc1_div = DIV_ROUND_UP(div1_value, 6); else if ((div1_value > 40) && (div1_value <= 50)) txesc1_div = DIV_ROUND_UP(div1_value, 8); else txesc1_div = 10; /* Calculate TXESC2 divider */ div2_value = DIV_ROUND_UP(div1_value, txesc1_div); txesc2_div = min_t(u32, div2_value, 10); intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; u32 dsi_rate = 0; u32 pll_ratio = 0; u32 rx_div; u32 tx_div; u32 rx_div_upper; u32 rx_div_lower; u32 mipi_8by3_divider; /* Clear old configurations */ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); /* Get the current DSI rate(actual) */ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; /* * tx clock should be <= 20MHz and the div value must be * subtracted by 1 as per bspec */ tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1; /* * rx clock should be <= 150MHz and the div value must be * subtracted by 1 as per bspec */ rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1; /* * rx divider value needs to be updated in the * two differnt bit fields in the register hence splitting the * rx divider value accordingly */ rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2; rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2; mipi_8by3_divider = 0x2; tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider); tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div); tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower); tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper); intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; int pclk; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); /* * From clock diagram, to get PLL ratio divider, divide double of DSI * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to * round 'up' the result */ dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ); if (IS_BROXTON(dev_priv)) { dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN; dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX; } else { dsi_ratio_min = GLK_DSI_PLL_RATIO_MIN; dsi_ratio_max = GLK_DSI_PLL_RATIO_MAX; } if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { drm_err(&dev_priv->drm, "Can't get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); /* * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x * Spec says both have to be programmed, even if one is not getting * used. Configure MIPI_CLOCK_CTL dividers in modeset */ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2; /* As per recommendation from hardware team, * Prog PVD ratio =1 if dsi ratio <= 50 */ if (IS_BROXTON(dev_priv) && dsi_ratio <= 50) config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; pclk = bxt_dsi_pclk(encoder, config); config->port_clock = pclk; /* FIXME definitely not right for burst/cmd mode/pixel overlap */ config->hw.adjusted_mode.crtc_clock = pclk; if (intel_dsi->dual_link) config->hw.adjusted_mode.crtc_clock *= 2; return 0; } void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(&dev_priv->drm, "\n"); /* Configure PLL vales */ intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL); /* Program TX, RX, Dphy clocks */ if (IS_BROXTON(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) bxt_dsi_program_clocks(encoder->base.dev, port, config); } else { glk_dsi_program_esc_clock(encoder->base.dev, config); } /* Enable DSI PLL */ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) { drm_err(&dev_priv->drm, "Timed out waiting for DSI PLL to lock\n"); return; } drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); } void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 tmp; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); /* Clear old configurations */ if (IS_BROXTON(dev_priv)) { tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } else { intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); } intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); } static void assert_dsi_pll(struct drm_i915_private *i915, bool state) { bool cur_state; vlv_cck_get(i915); cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN; vlv_cck_put(i915); I915_STATE_WARN(i915, cur_state != state, "DSI PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } void assert_dsi_pll_enabled(struct drm_i915_private *i915) { assert_dsi_pll(i915, true); } void assert_dsi_pll_disabled(struct drm_i915_private *i915) { assert_dsi_pll(i915, false); }
linux-master
drivers/gpu/drm/i915/display/vlv_dsi_pll.c
/* * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> */ #include <linux/dma-resv.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_object.h" #include "g4x_dp.h" #include "g4x_hdmi.h" #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" #include "i9xx_plane.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" #include "intel_color.h" #include "intel_crt.h" #include "intel_crtc.h" #include "intel_crtc_state_dump.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_driver.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_dpt.h" #include "intel_drrs.h" #include "intel_dsi.h" #include "intel_dvo.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_frontbuffer.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_modeset_setup.h" #include "intel_modeset_verify.h" #include "intel_overlay.h" #include "intel_panel.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pipe_crc.h" #include "intel_plane_initial.h" #include "intel_pmdemand.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_sdvo.h" #include "intel_snps_phy.h" #include "intel_tc.h" #include "intel_tv.h" #include "intel_vblank.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" #include "intel_vga.h" #include "intel_vrr.h" #include "intel_wm.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" #include "vlv_sideband.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; /* Obtain SKU information */ hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK; return vco_freq[hpll_freq] * 1000; } int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq) { u32 val; int divider; val = vlv_cck_read(dev_priv, reg); divider = val & CCK_FREQUENCY_VALUES; drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != (divider << CCK_FREQUENCY_STATUS_SHIFT), "%s change in progress\n", name); return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); } int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, const char *name, u32 reg) { int hpll; vlv_cck_get(dev_priv); if (dev_priv->hpll_freq == 0) dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); vlv_cck_put(dev_priv); return hpll; } void intel_update_czclk(struct drm_i915_private *dev_priv) { if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) return; dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", CCK_CZ_CLOCK_CONTROL); drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq); } static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) { return (crtc_state->active_planes & ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; } /* WA Display #0827: Gen9:all */ static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); else intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); } /* Wa_2006604312:icl,ehl */ static void icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); else intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); } /* Wa_1604331009:icl,jsl,ehl */ static void icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, enable ? CURSOR_GATING_DIS : 0); } static bool is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { return crtc_state->master_transcoder != INVALID_TRANSCODER; } bool is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) { return crtc_state->sync_mode_slaves_mask != 0; } bool is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) { return is_trans_port_sync_master(crtc_state) || is_trans_port_sync_slave(crtc_state); } static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) { return ffs(crtc_state->bigjoiner_pipes) - 1; } u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) { if (crtc_state->bigjoiner_pipes) return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); else return 0; } bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); return crtc_state->bigjoiner_pipes && crtc->pipe != bigjoiner_master_pipe(crtc_state); } bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); return crtc_state->bigjoiner_pipes && crtc->pipe == bigjoiner_master_pipe(crtc_state); } static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) { return hweight8(crtc_state->bigjoiner_pipes); } struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (intel_crtc_is_bigjoiner_slave(crtc_state)) return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); else return to_intel_crtc(crtc_state->uapi.crtc); } static void intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (DISPLAY_VER(dev_priv) >= 4) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; /* Wait for the Pipe State to go off */ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), TRANSCONF_STATE_ENABLE, 100)) drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); } } void assert_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, bool state) { bool cur_state; enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) state = true; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (wakeref) { u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); cur_state = !!(val & TRANSCONF_ENABLE); intel_display_power_put(dev_priv, power_domain, wakeref); } else { cur_state = false; } I915_STATE_WARN(dev_priv, cur_state != state, "transcoder %s assertion failure (expected %s, current %s)\n", transcoder_name(cpu_transcoder), str_on_off(state), str_on_off(cur_state)); } static void assert_plane(struct intel_plane *plane, bool state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe; bool cur_state; cur_state = plane->get_hw_state(plane, &pipe); I915_STATE_WARN(i915, cur_state != state, "%s assertion failure (expected %s, current %s)\n", plane->base.name, str_on_off(state), str_on_off(cur_state)); } #define assert_plane_enabled(p) assert_plane(p, true) #define assert_plane_disabled(p) assert_plane(p, false) static void assert_planes_disabled(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_plane *plane; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) assert_plane_disabled(plane); } void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask) { u32 port_mask; i915_reg_t dpll_reg; switch (dig_port->base.port) { default: MISSING_CASE(dig_port->base.port); fallthrough; case PORT_B: port_mask = DPLL_PORTB_READY_MASK; dpll_reg = DPLL(0); break; case PORT_C: port_mask = DPLL_PORTC_READY_MASK; dpll_reg = DPLL(0); expected_mask <<= 4; break; case PORT_D: port_mask = DPLL_PORTD_READY_MASK; dpll_reg = DPIO_PHY_STATUS; break; } if (intel_de_wait_for_register(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) drm_WARN(&dev_priv->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", dig_port->base.base.base.id, dig_port->base.base.name, intel_de_read(dev_priv, dpll_reg) & port_mask, expected_mask); } void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 val; drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); assert_planes_disabled(crtc); /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ if (HAS_GMCH(dev_priv)) { if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); } else { if (new_crtc_state->has_pch_encoder) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, intel_crtc_pch_transcoder(crtc)); assert_fdi_tx_pll_enabled(dev_priv, (enum pipe) cpu_transcoder); } /* FIXME: assert CPU port conditions for SNB+ */ } /* Wa_22012358565:adl-p */ if (DISPLAY_VER(dev_priv) == 13) intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 0, PIPE_ARB_USE_PROG_SLOTS); reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); return; } intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); intel_de_posting_read(dev_priv, reg); /* * Until the pipe starts PIPEDSL reads will return a stale value, * which causes an apparent vblank timestamp jump when PIPEDSL * resets to its proper value. That also messes up the frame count * when it's derived from the timestamps. So let's wait for the * pipe to start properly before we call drm_crtc_vblank_on() */ if (intel_crtc_max_vblank_count(new_crtc_state) == 0) intel_wait_for_pipe_scanline_moving(crtc); } void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 val; drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); /* * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ assert_planes_disabled(crtc); reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); if ((val & TRANSCONF_ENABLE) == 0) return; /* * Double wide has implications for planes * so best keep it disabled when not needed. */ if (old_crtc_state->double_wide) val &= ~TRANSCONF_DOUBLE_WIDE; /* Don't disable pipe or pipe PLLs if needed */ if (!IS_I830(dev_priv)) val &= ~TRANSCONF_ENABLE; if (DISPLAY_VER(dev_priv) >= 14) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), FECSTALL_DIS_DPTSTREAM_DPTTG, 0); else if (DISPLAY_VER(dev_priv) >= 12) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), FECSTALL_DIS_DPTSTREAM_DPTTG, 0); intel_de_write(dev_priv, reg, val); if ((val & TRANSCONF_ENABLE) == 0) intel_wait_for_pipe_off(old_crtc_state); } unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) { unsigned int size = 0; int i; for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; return size; } unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) { unsigned int size = 0; int i; for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { unsigned int plane_size; if (rem_info->plane[i].linear) plane_size = rem_info->plane[i].size; else plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; if (plane_size == 0) continue; if (rem_info->plane_alignment) size = ALIGN(size, rem_info->plane_alignment); size += plane_size; } return size; } bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return DISPLAY_VER(dev_priv) < 4 || (plane->fbc && plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); } /* * Convert the x/y offsets into a linear offset. * Only valid with 0/180 degree rotation, which is fine since linear * offset is only used with linear buffers on pre-hsw and tiled buffers * with gen2/3, and 90/270 degree rotations isn't supported on any of them. */ u32 intel_fb_xy_to_linear(int x, int y, const struct intel_plane_state *state, int color_plane) { const struct drm_framebuffer *fb = state->hw.fb; unsigned int cpp = fb->format->cpp[color_plane]; unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; return y * pitch + x * cpp; } /* * Add the x/y offsets derived from fb->offsets[] to the user * specified plane src x/y offsets. The resulting x/y offsets * specify the start of scanout from the beginning of the gtt mapping. */ void intel_add_fb_offsets(int *x, int *y, const struct intel_plane_state *state, int color_plane) { *x += state->view.color_plane[color_plane].x; *y += state->view.color_plane[color_plane].y; } u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { struct intel_crtc *crtc; struct intel_plane *plane; if (!HAS_DISPLAY(dev_priv)) return 0; /* * We assume the primary plane for pipe A has * the highest stride limits of them all, * if in case pipe A is disabled, use the first pipe from pipe_mask. */ crtc = intel_first_crtc(dev_priv); if (!crtc) return 0; plane = to_intel_plane(crtc->base.primary); return plane->max_stride(plane, pixel_format, modifier, DRM_MODE_ROTATE_0); } void intel_set_plane_visible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state, bool visible) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); plane_state->uapi.visible = visible; if (visible) crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); else crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); } void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct drm_plane *plane; /* * Active_planes aliases if multiple "primary" or cursor planes * have been used on the same (or wrong) pipe. plane_mask uses * unique ids, hence we can use that to reconstruct active_planes. */ crtc_state->enabled_planes = 0; crtc_state->active_planes = 0; drm_for_each_plane_mask(plane, &dev_priv->drm, crtc_state->uapi.plane_mask) { crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); } } void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); drm_dbg_kms(&dev_priv->drm, "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", plane->base.base.id, plane->base.name, crtc->base.base.id, crtc->base.name); intel_set_plane_visible(crtc_state, plane_state, false); intel_plane_fixup_bitmasks(crtc_state); crtc_state->data_rate[plane->id] = 0; crtc_state->data_rate_y[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; crtc_state->rel_data_rate_y[plane->id] = 0; crtc_state->min_cdclk[plane->id] = 0; if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && hsw_ips_disable(crtc_state)) { crtc_state->ips_enabled = false; intel_crtc_wait_for_next_vblank(crtc); } /* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that * moment. So to make sure the plane gets truly disabled, disable * first the self-refresh mode. The self-refresh enable bit in turn * will be checked/applied by the HW only at the next frame start * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ if (HAS_GMCH(dev_priv) && intel_set_memory_cxsr(dev_priv, false)) intel_crtc_wait_for_next_vblank(crtc); /* * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled. */ if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); intel_plane_disable_arm(plane, crtc_state); intel_crtc_wait_for_next_vblank(crtc); } unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) { int x = 0, y = 0; intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, plane_state->view.color_plane[0].offset, 0); return y; } static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tmp; tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); /* * Display WA #1153: icl * enable hardware to bypass the alpha math * and rounding for per-pixel values 00 and 0xff */ tmp |= PER_PIXEL_ALPHA_BYPASS_EN; /* * Display WA # 1605353570: icl * Set the pixel rounding bit to 1 for allowing * passthrough of Frame buffer pixels unmodified * across pipe */ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; /* * Underrun recovery must always be disabled on display 13+. * DG2 chicken bit meaning is inverted compared to other platforms. */ if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; else if (DISPLAY_VER(dev_priv) >= 13) tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; /* Wa_14010547955:dg2 */ if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) tmp |= DG2_RENDER_CCSTAG_4_3_EN; intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) { struct drm_crtc *crtc; bool cleanup_done; drm_for_each_crtc(crtc, &dev_priv->drm) { struct drm_crtc_commit *commit; spin_lock(&crtc->commit_lock); commit = list_first_entry_or_null(&crtc->commit_list, struct drm_crtc_commit, commit_entry); cleanup_done = commit ? try_wait_for_completion(&commit->cleanup_done) : true; spin_unlock(&crtc->commit_lock); if (cleanup_done) continue; intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); return true; } return false; } /* * Finds the encoder associated with the given CRTC. This can only be * used when we know that the CRTC isn't feeding multiple encoders! */ struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { const struct drm_connector_state *connector_state; const struct drm_connector *connector; struct intel_encoder *encoder = NULL; struct intel_crtc *master_crtc; int num_encoders = 0; int i; master_crtc = intel_master_crtc(crtc_state); for_each_new_connector_in_state(&state->base, connector, connector_state, i) { if (connector_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(connector_state->best_encoder); num_encoders++; } drm_WARN(state->base.dev, num_encoders != 1, "%d encoders for pipe %c\n", num_encoders, pipe_name(master_crtc->pipe)); return encoder; } static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_rect *dst = &crtc_state->pch_pfit.dst; enum pipe pipe = crtc->pipe; int width = drm_rect_width(dst); int height = drm_rect_height(dst); int x = dst->x1; int y = dst->y1; if (!crtc_state->pch_pfit.enabled) return; /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. */ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); else intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) { if (crtc->overlay) (void) intel_overlay_switch_off(crtc->overlay); /* Let userspace switch the overlay on again. In most cases userspace * has to recompute where to put it anyway. */ } static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (!crtc_state->nv12_planes) return false; /* WA Display #0827: Gen9:all */ if (DISPLAY_VER(dev_priv) == 9) return true; return false; } static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* Wa_2006604312:icl,ehl */ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) return true; return false; } static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* Wa_1604331009:icl,jsl,ehl */ if (is_hdr_mode(crtc_state) && crtc_state->active_planes & BIT(PLANE_CURSOR) && DISPLAY_VER(dev_priv) == 11) return true; return false; } static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, enum pipe pipe, bool enable) { if (DISPLAY_VER(i915) == 9) { /* * "Plane N strech max must be programmed to 11b (x1) * when Async flips are enabled on that plane." */ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), SKL_PLANE1_STRETCH_MAX_MASK, enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); } else { /* Also needed on HSW/BDW albeit undocumented */ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), HSW_PRI_STRETCH_MAX_MASK, enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); } } static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); return crtc_state->uapi.async_flip && i915_vtd_active(i915) && (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); } #define is_enabling(feature, old_crtc_state, new_crtc_state) \ ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ (new_crtc_state)->feature) #define is_disabling(feature, old_crtc_state, new_crtc_state) \ ((old_crtc_state)->feature && \ (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { return is_enabling(active_planes, old_crtc_state, new_crtc_state); } static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { return is_disabling(active_planes, old_crtc_state, new_crtc_state); } static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { return is_enabling(vrr.enable, old_crtc_state, new_crtc_state); } static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { return is_disabling(vrr.enable, old_crtc_state, new_crtc_state); } #undef is_disabling #undef is_enabling static void intel_post_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) intel_update_watermarks(dev_priv); intel_fbc_post_update(state, crtc); if (needs_async_flip_vtd_wa(old_crtc_state) && !needs_async_flip_vtd_wa(new_crtc_state)) intel_async_flip_vtd_wa(dev_priv, pipe, false); if (needs_nv12_wa(old_crtc_state) && !needs_nv12_wa(new_crtc_state)) skl_wa_827(dev_priv, pipe, false); if (needs_scalerclk_wa(old_crtc_state) && !needs_scalerclk_wa(new_crtc_state)) icl_wa_scalerclkgating(dev_priv, pipe, false); if (needs_cursorclk_wa(old_crtc_state) && !needs_cursorclk_wa(new_crtc_state)) icl_wa_cursorclkgating(dev_priv, pipe, false); if (intel_crtc_needs_color_update(new_crtc_state)) intel_color_post_update(new_crtc_state); } static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u8 update_planes = crtc_state->update_planes; const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe == crtc->pipe && update_planes & BIT(plane->id)) plane->enable_flip_done(plane); } } static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u8 update_planes = crtc_state->update_planes; const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe == crtc->pipe && update_planes & BIT(plane->id)) plane->disable_flip_done(plane); } } static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes; const struct intel_plane_state *old_plane_state; struct intel_plane *plane; bool need_vbl_wait = false; int i; for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { if (plane->need_async_flip_disable_wa && plane->pipe == crtc->pipe && disable_async_flip_planes & BIT(plane->id)) { /* * Apart from the async flip bit we want to * preserve the old state for the plane. */ plane->async_flip(plane, old_crtc_state, old_plane_state, false); need_vbl_wait = true; } } if (need_vbl_wait) intel_crtc_wait_for_next_vblank(crtc); } static void intel_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; if (vrr_disabling(old_crtc_state, new_crtc_state)) { intel_vrr_disable(old_crtc_state); intel_crtc_update_active_timings(old_crtc_state, false); } intel_drrs_deactivate(old_crtc_state); intel_psr_pre_plane_update(state, crtc); if (hsw_ips_pre_update(state, crtc)) intel_crtc_wait_for_next_vblank(crtc); if (intel_fbc_pre_update(state, crtc)) intel_crtc_wait_for_next_vblank(crtc); if (!needs_async_flip_vtd_wa(old_crtc_state) && needs_async_flip_vtd_wa(new_crtc_state)) intel_async_flip_vtd_wa(dev_priv, pipe, true); /* Display WA 827 */ if (!needs_nv12_wa(old_crtc_state) && needs_nv12_wa(new_crtc_state)) skl_wa_827(dev_priv, pipe, true); /* Wa_2006604312:icl,ehl */ if (!needs_scalerclk_wa(old_crtc_state) && needs_scalerclk_wa(new_crtc_state)) icl_wa_scalerclkgating(dev_priv, pipe, true); /* Wa_1604331009:icl,jsl,ehl */ if (!needs_cursorclk_wa(old_crtc_state) && needs_cursorclk_wa(new_crtc_state)) icl_wa_cursorclkgating(dev_priv, pipe, true); /* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that * moment. So to make sure the plane gets truly disabled, disable * first the self-refresh mode. The self-refresh enable bit in turn * will be checked/applied by the HW only at the next frame start * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) intel_crtc_wait_for_next_vblank(crtc); /* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled * when scaling is disabled. * * WaCxSRDisabledForSpriteScaling:ivb */ if (old_crtc_state->hw.active && new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) intel_crtc_wait_for_next_vblank(crtc); /* * If we're doing a modeset we don't need to do any * pre-vblank watermark programming here. */ if (!intel_crtc_needs_modeset(new_crtc_state)) { /* * For platforms that support atomic watermarks, program the * 'intermediate' watermarks immediately. On pre-gen9 platforms, these * will be the intermediate values that are safe for both pre- and * post- vblank; when vblank happens, the 'active' values will be set * to the final 'target' values and we'll do this again to get the * optimal watermarks. For gen9+ platforms, the values we program here * will be the final target values which will get automatically latched * at vblank time; no further programming will be necessary. * * If a platform hasn't been transitioned to atomic watermarks yet, * we'll continue to update watermarks the old way, if flags tell * us to. */ if (!intel_initial_watermarks(state, crtc)) if (new_crtc_state->update_wm_pre) intel_update_watermarks(dev_priv); } /* * Gen2 reports pipe underruns whenever all planes are disabled. * So disable underrun reporting before all the planes get disabled. * * We do this after .initial_watermarks() so that we have a * chance of catching underruns with the intermediate watermarks * vs. the old plane configuration. */ if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); /* * WA for platforms where async address update enable bit * is double buffered and only latched at start of vblank. */ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) intel_crtc_async_flip_disable_wa(state, crtc); } static void intel_crtc_disable_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); unsigned int update_mask = new_crtc_state->update_planes; const struct intel_plane_state *old_plane_state; struct intel_plane *plane; unsigned fb_bits = 0; int i; intel_crtc_dpms_overlay_disable(crtc); for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { if (crtc->pipe != plane->pipe || !(update_mask & BIT(plane->id))) continue; intel_plane_disable_arm(plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; } intel_frontbuffer_flip(dev_priv, fb_bits); } static void intel_encoders_update_prepare(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; int i; /* * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. */ if (i915->display.dpll.mgr) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state)) continue; new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; } } } static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_pll_enable) encoder->pre_pll_enable(state, encoder, crtc_state, conn_state); } } static void intel_encoders_pre_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_enable) encoder->pre_enable(state, encoder, crtc_state, conn_state); } } static void intel_encoders_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); if (conn_state->crtc != &crtc->base) continue; if (encoder->enable) encoder->enable(state, encoder, crtc_state, conn_state); intel_opregion_notify_encoder(encoder, true); } } static void intel_encoders_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); if (old_conn_state->crtc != &crtc->base) continue; intel_opregion_notify_encoder(encoder, false); if (encoder->disable) encoder->disable(state, encoder, old_crtc_state, old_conn_state); } } static void intel_encoders_post_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_disable) encoder->post_disable(state, encoder, old_crtc_state, old_conn_state); } } static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_pll_disable) encoder->post_pll_disable(state, encoder, old_crtc_state, old_conn_state); } } static void intel_encoders_update_pipe(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); if (conn_state->crtc != &crtc->base) continue; if (encoder->update_pipe) encoder->update_pipe(state, encoder, crtc_state, conn_state); } } static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); plane->disable_arm(plane, crtc_state); } static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (crtc_state->has_pch_encoder) { intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->fdi_m_n); } else if (intel_crtc_has_dp_encoder(crtc_state)) { intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n); intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2); } intel_set_transcoder_timings(crtc_state); ilk_set_pipeconf(crtc_state); } static void ilk_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; /* * Sometimes spurious CPU pipe underruns happen during FDI * training, at least with VGA+HDMI cloning. Suppress them. * * On ILK we get an occasional spurious CPU pipe underruns * between eDP port A enable and vdd enable. Also PCH port * enable seems to result in the occasional CPU pipe underrun. * * Spurious PCH underruns also occur during PCH enabling. */ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); ilk_configure_cpu_transcoder(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); crtc->active = true; intel_encoders_pre_enable(state, crtc); if (new_crtc_state->has_pch_encoder) { ilk_pch_pre_enable(state, crtc); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); } ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ intel_color_load_luts(new_crtc_state); intel_color_commit_noarm(new_crtc_state); intel_color_commit_arm(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); if (new_crtc_state->has_pch_encoder) ilk_pch_enable(state, crtc); intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); if (HAS_PCH_CPT(dev_priv)) intel_wait_for_pipe_scanline_moving(crtc); /* * Must wait for vblank to avoid spurious PCH FIFO underruns. * And a second vblank wait is needed at least on ILK with * some interlaced HDMI modes. Let's do the double wait always * in case there are more corner cases we don't know about. */ if (new_crtc_state->has_pch_encoder) { intel_crtc_wait_for_next_vblank(crtc); intel_crtc_wait_for_next_vblank(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, enum pipe pipe, bool apply) { u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; if (apply) val |= mask; else val &= ~mask; intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); } static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), HSW_LINETIME(crtc_state->linetime) | HSW_IPS_LINETIME(crtc_state->ips_linetime)); } static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : CHICKEN_TRANS(transcoder); intel_de_rmw(dev_priv, reg, HSW_FRAME_START_DELAY_MASK, HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); } static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); /* * Enable sequence steps 1-7 on bigjoiner master */ if (intel_crtc_is_bigjoiner_slave(crtc_state)) intel_encoders_pre_pll_enable(state, master_crtc); if (crtc_state->shared_dpll) intel_enable_shared_dpll(crtc_state); if (intel_crtc_is_bigjoiner_slave(crtc_state)) intel_encoders_pre_enable(state, master_crtc); } static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (crtc_state->has_pch_encoder) { intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->fdi_m_n); } else if (intel_crtc_has_dp_encoder(crtc_state)) { intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n); intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2); } intel_set_transcoder_timings(crtc_state); if (HAS_VRR(dev_priv)) intel_vrr_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), crtc_state->pixel_multiplier - 1); hsw_set_frame_start_delay(crtc_state); hsw_set_transconf(crtc_state); } static void hsw_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; bool psl_clkgate_wa; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; intel_dmc_enable_pipe(dev_priv, crtc->pipe); if (!new_crtc_state->bigjoiner_pipes) { intel_encoders_pre_pll_enable(state, crtc); if (new_crtc_state->shared_dpll) intel_enable_shared_dpll(new_crtc_state); intel_encoders_pre_enable(state, crtc); } else { icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); } intel_dsc_enable(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 13) intel_uncompressed_joiner_enable(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) bdw_set_pipe_misc(new_crtc_state); if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && !transcoder_is_dsi(cpu_transcoder)) hsw_configure_cpu_transcoder(new_crtc_state); crtc->active = true; /* Display WA #1180: WaDisableScalarClockGating: glk */ psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && new_crtc_state->pch_pfit.enabled; if (psl_clkgate_wa) glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); if (DISPLAY_VER(dev_priv) >= 9) skl_pfit_enable(new_crtc_state); else ilk_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ intel_color_load_luts(new_crtc_state); intel_color_commit_noarm(new_crtc_state); intel_color_commit_arm(new_crtc_state); /* update DSPCNTR to configure gamma/csc for pipe bottom color */ if (DISPLAY_VER(dev_priv) < 9) intel_disable_primary_plane(new_crtc_state); hsw_set_linetime_wm(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 11) icl_set_pipe_chicken(new_crtc_state); intel_initial_watermarks(state, crtc); if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { intel_crtc_wait_for_next_vblank(crtc); glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); } /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { struct intel_crtc *wa_crtc; wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); intel_crtc_wait_for_next_vblank(wa_crtc); intel_crtc_wait_for_next_vblank(wa_crtc); } } void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* To avoid upsetting the power well on haswell only disable the pfit if * it's in use. The hw state code will make sure we get this right. */ if (!old_crtc_state->pch_pfit.enabled) return; intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); } static void ilk_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * Sometimes spurious CPU pipe underruns happen when the * pipe is already disabled, but FDI RX/TX is still enabled. * Happens at least with VGA+HDMI cloning. Suppress them. */ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); intel_encoders_disable(state, crtc); intel_crtc_vblank_off(old_crtc_state); intel_disable_transcoder(old_crtc_state); ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) ilk_pch_disable(state, crtc); intel_encoders_post_disable(state, crtc); if (old_crtc_state->has_pch_encoder) ilk_pch_post_disable(state, crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); intel_disable_shared_dpll(old_crtc_state); } static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); } intel_disable_shared_dpll(old_crtc_state); if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { struct intel_crtc *slave_crtc; intel_encoders_post_pll_disable(state, crtc); intel_dmc_disable_pipe(i915, crtc->pipe); for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) intel_dmc_disable_pipe(i915, slave_crtc->pipe); } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!crtc_state->gmch_pfit.control) return; /* * The panel fitter should only be adjusted whilst the pipe is disabled, * according to register description and PRM. */ drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); intel_de_write(dev_priv, PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); /* Border color in case we don't scale up to the full screen. Black by * default, change to something else for debugging. */ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); } bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; else if (IS_ALDERLAKE_S(dev_priv)) return phy <= PHY_E; else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) return phy <= PHY_C; else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) return phy <= PHY_B; else /* * DG2 outputs labelled as "combo PHY" in the bspec use * SNPS PHYs with completely different programming, * hence we always return false here. */ return false; } bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { if (IS_DG2(dev_priv)) /* DG2's "TC1" output uses a SNPS PHY */ return false; else if (IS_ALDERLAKE_P(dev_priv) || IS_METEORLAKE(dev_priv)) return phy >= PHY_F && phy <= PHY_I; else if (IS_TIGERLAKE(dev_priv)) return phy >= PHY_D && phy <= PHY_I; else if (IS_ICELAKE(dev_priv)) return phy >= PHY_C && phy <= PHY_F; else return false; } bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; else if (IS_DG2(dev_priv)) /* * All four "combo" ports and the TC1 port (PHY E) use * Synopsis PHYs. */ return phy <= PHY_E; return false; } enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) return PHY_D + port - PORT_D_XELPD; else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) return PHY_F + port - PORT_TC1; else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) return PHY_B + port - PORT_TC1; else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) return PHY_C + port - PORT_TC1; else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && port == PORT_D) return PHY_A; return PHY_A + port - PORT_A; } enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) { if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) return TC_PORT_NONE; if (DISPLAY_VER(dev_priv) >= 12) return TC_PORT_1 + port - PORT_TC1; else return TC_PORT_1 + port - PORT_C; } enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); if (intel_tc_port_in_tbt_alt_mode(dig_port)) return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, struct intel_power_domain_mask *mask) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; bitmap_zero(mask->bits, POWER_DOMAIN_NUM); if (!crtc_state->hw.active) return; set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); if (crtc_state->pch_pfit.enabled || crtc_state->pch_pfit.force_thru) set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); drm_for_each_encoder_mask(encoder, &dev_priv->drm, crtc_state->uapi.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); set_bit(intel_encoder->power_domain, mask->bits); } if (HAS_DDI(dev_priv) && crtc_state->has_audio) set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); if (crtc_state->shared_dpll) set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); if (crtc_state->dsc.compression_enable) set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); } void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, struct intel_power_domain_mask *old_domains) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; struct intel_power_domain_mask domains, new_domains; get_crtc_power_domains(crtc_state, &domains); bitmap_andnot(new_domains.bits, domains.bits, crtc->enabled_power_domains.mask.bits, POWER_DOMAIN_NUM); bitmap_andnot(old_domains->bits, crtc->enabled_power_domains.mask.bits, domains.bits, POWER_DOMAIN_NUM); for_each_power_domain(domain, &new_domains) intel_display_power_get_in_set(dev_priv, &crtc->enabled_power_domains, domain); } void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, struct intel_power_domain_mask *domains) { intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), &crtc->enabled_power_domains, domains); } static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (intel_crtc_has_dp_encoder(crtc_state)) { intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n); intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2); } intel_set_transcoder_timings(crtc_state); i9xx_set_pipeconf(crtc_state); } static void valleyview_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; i9xx_configure_cpu_transcoder(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); } crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_encoders_pre_pll_enable(state, crtc); if (IS_CHERRYVIEW(dev_priv)) chv_enable_pll(new_crtc_state); else vlv_enable_pll(new_crtc_state); intel_encoders_pre_enable(state, crtc); i9xx_pfit_enable(new_crtc_state); intel_color_load_luts(new_crtc_state); intel_color_commit_noarm(new_crtc_state); intel_color_commit_arm(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); } static void i9xx_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; i9xx_configure_cpu_transcoder(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); crtc->active = true; if (DISPLAY_VER(dev_priv) != 2) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_encoders_pre_enable(state, crtc); i9xx_enable_pll(new_crtc_state); i9xx_pfit_enable(new_crtc_state); intel_color_load_luts(new_crtc_state); intel_color_commit_noarm(new_crtc_state); intel_color_commit_arm(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); if (!intel_initial_watermarks(state, crtc)) intel_update_watermarks(dev_priv); intel_enable_transcoder(new_crtc_state); intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); /* prevents spurious underruns */ if (DISPLAY_VER(dev_priv) == 2) intel_crtc_wait_for_next_vblank(crtc); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!old_crtc_state->gmch_pfit.control) return; assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", intel_de_read(dev_priv, PFIT_CONTROL)); intel_de_write(dev_priv, PFIT_CONTROL, 0); } static void i9xx_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. */ if (DISPLAY_VER(dev_priv) == 2) intel_crtc_wait_for_next_vblank(crtc); intel_encoders_disable(state, crtc); intel_crtc_vblank_off(old_crtc_state); intel_disable_transcoder(old_crtc_state); i9xx_pfit_disable(old_crtc_state); intel_encoders_post_disable(state, crtc); if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev_priv)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev_priv)) vlv_disable_pll(dev_priv, pipe); else i9xx_disable_pll(old_crtc_state); } intel_encoders_post_pll_disable(state, crtc); if (DISPLAY_VER(dev_priv) != 2) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); if (!dev_priv->display.funcs.wm->initial_watermarks) intel_update_watermarks(dev_priv); /* clock the pipe down to 640x480@60 to potentially save power */ if (IS_I830(dev_priv)) i830_enable_pipe(dev_priv, pipe); } void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); drm_encoder_cleanup(encoder); kfree(intel_encoder); } static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) { const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* GDG double wide on either pipe, otherwise pipe A only */ return DISPLAY_VER(dev_priv) < 4 && (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); } static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) { u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; struct drm_rect src; /* * We only use IF-ID interlacing. If we ever use * PF-ID we'll need to adjust the pixel_rate here. */ if (!crtc_state->pch_pfit.enabled) return pixel_rate; drm_rect_init(&src, 0, 0, drm_rect_width(&crtc_state->pipe_src) << 16, drm_rect_height(&crtc_state->pipe_src) << 16); return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, pixel_rate); } static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, const struct drm_display_mode *timings) { mode->hdisplay = timings->crtc_hdisplay; mode->htotal = timings->crtc_htotal; mode->hsync_start = timings->crtc_hsync_start; mode->hsync_end = timings->crtc_hsync_end; mode->vdisplay = timings->crtc_vdisplay; mode->vtotal = timings->crtc_vtotal; mode->vsync_start = timings->crtc_vsync_start; mode->vsync_end = timings->crtc_vsync_end; mode->flags = timings->flags; mode->type = DRM_MODE_TYPE_DRIVER; mode->clock = timings->crtc_clock; drm_mode_set_name(mode); } static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (HAS_GMCH(dev_priv)) /* FIXME calculate proper pipe pixel rate for GMCH pfit */ crtc_state->pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; else crtc_state->pixel_rate = ilk_pipe_pixel_rate(crtc_state); } static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, struct drm_display_mode *mode) { int num_pipes = intel_bigjoiner_num_pipes(crtc_state); if (num_pipes < 2) return; mode->crtc_clock /= num_pipes; mode->crtc_hdisplay /= num_pipes; mode->crtc_hblank_start /= num_pipes; mode->crtc_hblank_end /= num_pipes; mode->crtc_hsync_start /= num_pipes; mode->crtc_hsync_end /= num_pipes; mode->crtc_htotal /= num_pipes; } static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, struct drm_display_mode *mode) { int overlap = crtc_state->splitter.pixel_overlap; int n = crtc_state->splitter.link_count; if (!crtc_state->splitter.enable) return; /* * eDP MSO uses segment timings from EDID for transcoder * timings, but full mode for everything else. * * h_full = (h_segment - pixel_overlap) * link_count */ mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; mode->crtc_clock *= n; } static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) { struct drm_display_mode *mode = &crtc_state->hw.mode; struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* * Start with the adjusted_mode crtc timings, which * have been filled with the transcoder timings. */ drm_mode_copy(pipe_mode, adjusted_mode); /* Expand MSO per-segment transcoder timings to full */ intel_splitter_adjust_timings(crtc_state, pipe_mode); /* * We want the full numbers in adjusted_mode normal timings, * adjusted_mode crtc timings are left with the raw transcoder * timings. */ intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); /* Populate the "user" mode with full numbers */ drm_mode_copy(mode, pipe_mode); intel_mode_from_crtc_timings(mode, mode); mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * (intel_bigjoiner_num_pipes(crtc_state) ?: 1); mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); /* Derive per-pipe timings in case bigjoiner is used */ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); intel_mode_from_crtc_timings(pipe_mode, pipe_mode); intel_crtc_compute_pixel_rate(crtc_state); } void intel_encoder_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { encoder->get_config(encoder, crtc_state); intel_crtc_readout_derived_state(crtc_state); } static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) { int num_pipes = intel_bigjoiner_num_pipes(crtc_state); int width, height; if (num_pipes < 2) return; width = drm_rect_width(&crtc_state->pipe_src); height = drm_rect_height(&crtc_state->pipe_src); drm_rect_init(&crtc_state->pipe_src, 0, 0, width / num_pipes, height); } static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_bigjoiner_compute_pipe_src(crtc_state); /* * Pipe horizontal size must be even in: * - DVO ganged mode * - LVDS dual channel mode * - Double wide pipe */ if (drm_rect_width(&crtc_state->pipe_src) & 1) { if (crtc_state->double_wide) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(i915)) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } } return 0; } static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int clock_limit = i915->max_dotclk_freq; /* * Start with the adjusted_mode crtc timings, which * have been filled with the transcoder timings. */ drm_mode_copy(pipe_mode, adjusted_mode); /* Expand MSO per-segment transcoder timings to full */ intel_splitter_adjust_timings(crtc_state, pipe_mode); /* Derive per-pipe timings in case bigjoiner is used */ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); intel_mode_from_crtc_timings(pipe_mode, pipe_mode); if (DISPLAY_VER(i915) < 4) { clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock * is > 90% of the (display) core speed. */ if (intel_crtc_supports_double_wide(crtc) && pipe_mode->crtc_clock > clock_limit) { clock_limit = i915->max_dotclk_freq; crtc_state->double_wide = true; } } if (pipe_mode->crtc_clock > clock_limit) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", crtc->base.base.id, crtc->base.name, pipe_mode->crtc_clock, clock_limit, str_yes_no(crtc_state->double_wide)); return -EINVAL; } return 0; } static int intel_crtc_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; ret = intel_crtc_compute_pipe_src(crtc_state); if (ret) return ret; ret = intel_crtc_compute_pipe_mode(crtc_state); if (ret) return ret; intel_crtc_compute_pixel_rate(crtc_state); if (crtc_state->has_pch_encoder) return ilk_fdi_compute_config(crtc, crtc_state); return 0; } static void intel_reduce_m_n_ratio(u32 *num, u32 *den) { while (*num > DATA_LINK_M_N_MASK || *den > DATA_LINK_M_N_MASK) { *num >>= 1; *den >>= 1; } } static void compute_m_n(u32 *ret_m, u32 *ret_n, u32 m, u32 n, u32 constant_n) { if (constant_n) *ret_n = constant_n; else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); intel_reduce_m_n_ratio(ret_m, ret_n); } void intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool fec_enable) { u32 data_clock = bits_per_pixel * pixel_clock; if (fec_enable) data_clock = intel_dp_mode_to_fec_clock(data_clock); /* * Windows/BIOS uses fixed M/N values always. Follow suit. * * Also several DP dongles in particular seem to be fussy * about too large link M/N values. Presumably the 20bit * value used by Windows/BIOS is acceptable to everyone. */ m_n->tu = 64; compute_m_n(&m_n->data_m, &m_n->data_n, data_clock, link_clock * nlanes * 8, 0x8000000); compute_m_n(&m_n->link_m, &m_n->link_n, pixel_clock, link_clock, 0x80000); } void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) { /* * There may be no VBT; and if the BIOS enabled SSC we can * just keep using it to avoid unnecessary flicker. Whereas if the * BIOS isn't using it, don't assume it will work even if the VBT * indicates as much. */ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { bool bios_lvds_use_ssc = intel_de_read(dev_priv, PCH_DREF_CONTROL) & DREF_SSC1_ENABLE; if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { drm_dbg_kms(&dev_priv->drm, "SSC %s by BIOS, overriding VBT which says %s\n", str_enabled_disabled(bios_lvds_use_ssc), str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; } } } void intel_zero_m_n(struct intel_link_m_n *m_n) { /* corresponds to 0 register value */ memset(m_n, 0, sizeof(*m_n)); m_n->tu = 1; } void intel_set_m_n(struct drm_i915_private *i915, const struct intel_link_m_n *m_n, i915_reg_t data_m_reg, i915_reg_t data_n_reg, i915_reg_t link_m_reg, i915_reg_t link_n_reg) { intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); intel_de_write(i915, data_n_reg, m_n->data_n); intel_de_write(i915, link_m_reg, m_n->link_m); /* * On BDW+ writing LINK_N arms the double buffered update * of all the M/N registers, so it must be written last. */ intel_de_write(i915, link_n_reg, m_n->link_n); } bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, enum transcoder transcoder) { if (IS_HASWELL(dev_priv)) return transcoder == TRANSCODER_EDP; return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); } void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, enum transcoder transcoder, const struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (DISPLAY_VER(dev_priv) >= 5) intel_set_m_n(dev_priv, m_n, PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); else intel_set_m_n(dev_priv, m_n, PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, enum transcoder transcoder, const struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) return; intel_set_m_n(dev_priv, m_n, PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise * the hw state checker will get angry at the mismatch. */ crtc_vdisplay = adjusted_mode->crtc_vdisplay; crtc_vtotal = adjusted_mode->crtc_vtotal; crtc_vblank_start = adjusted_mode->crtc_vblank_start; crtc_vblank_end = adjusted_mode->crtc_vblank_end; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { /* the chip adds 2 halflines automatically */ crtc_vtotal -= 1; crtc_vblank_end -= 1; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; else vsyncshift = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal / 2; if (vsyncshift < 0) vsyncshift += adjusted_mode->crtc_htotal; } /* * VBLANK_START no longer works on ADL+, instead we must use * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. */ if (DISPLAY_VER(dev_priv) >= 13) { intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), crtc_vblank_start - crtc_vdisplay); /* * VBLANK_START not used by hw, just clear it * to make it stand out in register dumps. */ crtc_vblank_start = 1; } if (DISPLAY_VER(dev_priv) > 3) intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), vsyncshift); intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), HACTIVE(adjusted_mode->crtc_hdisplay - 1) | HTOTAL(adjusted_mode->crtc_htotal - 1)); intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), VACTIVE(crtc_vdisplay - 1) | VTOTAL(crtc_vtotal - 1)); intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), VBLANK_START(crtc_vblank_start - 1) | VBLANK_END(crtc_vblank_end - 1)); intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); /* Workaround: when the EDP input selection is B, the VTOTAL_B must be * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is * documented on the DDI_FUNC_CTL register description, EDP Input Select * bits. */ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && (pipe == PIPE_B || pipe == PIPE_C)) intel_de_write(dev_priv, TRANS_VTOTAL(pipe), VACTIVE(crtc_vdisplay - 1) | VTOTAL(crtc_vtotal - 1)); } static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int width = drm_rect_width(&crtc_state->pipe_src); int height = drm_rect_height(&crtc_state->pipe_src); enum pipe pipe = crtc->pipe; /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ intel_de_write(dev_priv, PIPESRC(pipe), PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); } static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) == 2) return false; if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; else return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; } static void intel_get_transcoder_timings(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u32 tmp; tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; } tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; /* FIXME TGL+ DSI transcoders have this! */ if (!transcoder_is_dsi(cpu_transcoder)) { tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; } tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; if (intel_pipe_is_interlaced(pipe_config)) { adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; adjusted_mode->crtc_vtotal += 1; adjusted_mode->crtc_vblank_end += 1; } if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay + intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); } static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); int num_pipes = intel_bigjoiner_num_pipes(crtc_state); enum pipe master_pipe, pipe = crtc->pipe; int width; if (num_pipes < 2) return; master_pipe = bigjoiner_master_pipe(crtc_state); width = drm_rect_width(&crtc_state->pipe_src); drm_rect_translate_to(&crtc_state->pipe_src, (pipe - master_pipe) * width, 0); } static void intel_get_pipe_src_size(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); drm_rect_init(&pipe_config->pipe_src, 0, 0, REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); intel_bigjoiner_adjust_pipe_src(pipe_config); } void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; /* * - We keep both pipes enabled on 830 * - During modeset the pipe is still disabled and must remain so * - During fastset the pipe is already enabled and must remain so */ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) val |= TRANSCONF_ENABLE; if (crtc_state->double_wide) val |= TRANSCONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (crtc_state->dither && crtc_state->pipe_bpp != 30) val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; switch (crtc_state->pipe_bpp) { default: /* Case prevented by intel_choose_pipe_bpp_dither. */ MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: val |= TRANSCONF_BPC_6; break; case 24: val |= TRANSCONF_BPC_8; break; case 30: val |= TRANSCONF_BPC_10; break; } } if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (DISPLAY_VER(dev_priv) < 4 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; else val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; } else { val |= TRANSCONF_INTERLACE_PROGRESSIVE; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && crtc_state->limited_color_range) val |= TRANSCONF_COLOR_RANGE_SELECT; val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); if (crtc_state->wgc_enable) val |= TRANSCONF_WGC_ENABLE; val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) { if (IS_I830(dev_priv)) return false; return DISPLAY_VER(dev_priv) >= 4 || IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe; u32 tmp; if (!i9xx_has_pfit(dev_priv)) return; tmp = intel_de_read(dev_priv, PFIT_CONTROL); if (!(tmp & PFIT_ENABLE)) return; /* Check whether the pfit is attached to our pipe. */ if (DISPLAY_VER(dev_priv) >= 4) pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); else pipe = PIPE_B; if (pipe != crtc->pipe) return; crtc_state->gmch_pfit.control = tmp; crtc_state->gmch_pfit.pgm_ratios = intel_de_read(dev_priv, PFIT_PGM_RATIOS); } static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; struct dpll clock; u32 mdiv; int refclk = 100000; /* In case of DSI, DPLL will not be used */ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) return; vlv_dpio_get(dev_priv); mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); vlv_dpio_put(dev_priv); clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; clock.m2 = mdiv & DPIO_M2DIV_MASK; clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); } static void chv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; /* In case of DSI, DPLL will not be used */ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) return; vlv_dpio_get(dev_priv); cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); vlv_dpio_put(dev_priv); clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; clock.m2 = (pll_dw0 & 0xff) << 22; if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) clock.m2 |= pll_dw2 & 0x3fffff; clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); } static enum intel_output_format bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); if (tmp & PIPE_MISC_YUV420_ENABLE) { /* We support 4:2:0 in full blend mode only */ drm_WARN_ON(&dev_priv->drm, (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); return INTEL_OUTPUT_FORMAT_YCBCR420; } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { return INTEL_OUTPUT_FORMAT_YCBCR444; } else { return INTEL_OUTPUT_FORMAT_RGB; } } static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 tmp; tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (tmp & DISP_PIPE_GAMMA_ENABLE) crtc_state->gamma_enable = true; if (!HAS_GMCH(dev_priv) && tmp & DISP_PIPE_CSC_ENABLE) crtc_state->csc_enable = true; } static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->sink_format = pipe_config->output_format; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; ret = false; tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); if (!(tmp & TRANSCONF_ENABLE)) goto out; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { switch (tmp & TRANSCONF_BPC_MASK) { case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; default: MISSING_CASE(tmp); break; } } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && (tmp & TRANSCONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && (tmp & TRANSCONF_WGC_ENABLE)) pipe_config->wgc_enable = true; if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, CGM_PIPE_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); if (DISPLAY_VER(dev_priv) < 4) pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); i9xx_get_pfit_config(pipe_config); if (DISPLAY_VER(dev_priv) >= 4) { /* No way to read it out on pipes B and C */ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; else tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & SDVO_MULTIPLIER_MASK) >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; } else { /* Note that on i915G/GM the pixel multiplier is in the sdvo * port and will be fixed up in the encoder->get_config * function. */ pipe_config->pixel_multiplier = 1; } pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, DPLL(crtc->pipe)); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, FP0(crtc->pipe)); pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, FP1(crtc->pipe)); } else { /* Mask out read-only status bits. */ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | DPLL_PORTC_READY_MASK | DPLL_PORTB_READY_MASK); } if (IS_CHERRYVIEW(dev_priv)) chv_crtc_clock_get(crtc, pipe_config); else if (IS_VALLEYVIEW(dev_priv)) vlv_crtc_clock_get(crtc, pipe_config); else i9xx_crtc_clock_get(crtc, pipe_config); /* * Normally the dotclock is filled in by the encoder .get_config() * but in case the pipe is enabled w/o any ports we need a sane * default. */ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock / pipe_config->pixel_multiplier; ret = true; out: intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; /* * - During modeset the pipe is still disabled and must remain so * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) val |= TRANSCONF_ENABLE; switch (crtc_state->pipe_bpp) { default: /* Case prevented by intel_choose_pipe_bpp_dither. */ MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: val |= TRANSCONF_BPC_6; break; case 24: val |= TRANSCONF_BPC_8; break; case 30: val |= TRANSCONF_BPC_10; break; case 36: val |= TRANSCONF_BPC_12; break; } if (crtc_state->dither) val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= TRANSCONF_INTERLACE_IF_ID_ILK; else val |= TRANSCONF_INTERLACE_PF_PD_ILK; /* * This would end up with an odd purple hue over * the entire display. Make sure we don't do it. */ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->limited_color_range && !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANSCONF_COLOR_RANGE_SELECT; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; /* * - During modeset the pipe is still disabled and must remain so * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) val |= TRANSCONF_ENABLE; if (IS_HASWELL(dev_priv) && crtc_state->dither) val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= TRANSCONF_INTERLACE_IF_ID_ILK; else val |= TRANSCONF_INTERLACE_PF_PD_ILK; if (IS_HASWELL(dev_priv) && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = 0; switch (crtc_state->pipe_bpp) { case 18: val |= PIPE_MISC_BPC_6; break; case 24: val |= PIPE_MISC_BPC_8; break; case 30: val |= PIPE_MISC_BPC_10; break; case 36: /* Port output 12BPC defined for ADLP+ */ if (DISPLAY_VER(dev_priv) > 12) val |= PIPE_MISC_BPC_12_ADLP; break; default: MISSING_CASE(crtc_state->pipe_bpp); break; } if (crtc_state->dither) val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) val |= PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND; if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) val |= PIPE_MISC_HDR_MODE_PRECISION; if (DISPLAY_VER(dev_priv) >= 12) val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; /* allow PSR with sprite enabled */ if (IS_BROADWELL(dev_priv)) val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); } int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); switch (tmp & PIPE_MISC_BPC_MASK) { case PIPE_MISC_BPC_6: return 18; case PIPE_MISC_BPC_8: return 24; case PIPE_MISC_BPC_10: return 30; /* * PORT OUTPUT 12 BPC defined for ADLP+. * * TODO: * For previous platforms with DSI interface, bits 5:7 * are used for storing pipe_bpp irrespective of dithering. * Since the value of 12 BPC is not defined for these bits * on older platforms, need to find a workaround for 12 BPC * MIPI DSI HW readout. */ case PIPE_MISC_BPC_12_ADLP: if (DISPLAY_VER(dev_priv) > 12) return 36; fallthrough; default: MISSING_CASE(tmp); return 0; } } int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) { /* * Account for spread spectrum to avoid * oversubscribing the link. Max center spread * is 2.5%; use 5% for safety's sake. */ u32 bps = target_clock * bpp * 21 / 20; return DIV_ROUND_UP(bps, link_bw * 8); } void intel_get_m_n(struct drm_i915_private *i915, struct intel_link_m_n *m_n, i915_reg_t data_m_reg, i915_reg_t data_n_reg, i915_reg_t link_m_reg, i915_reg_t link_n_reg) { m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, enum transcoder transcoder, struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (DISPLAY_VER(dev_priv) >= 5) intel_get_m_n(dev_priv, m_n, PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); else intel_get_m_n(dev_priv, m_n, PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, enum transcoder transcoder, struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) return; intel_get_m_n(dev_priv, m_n, PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 ctl, pos, size; enum pipe pipe; ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); if ((ctl & PF_ENABLE) == 0) return; if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); else pipe = crtc->pipe; crtc_state->pch_pfit.enabled = true; pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); drm_rect_init(&crtc_state->pch_pfit.dst, REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); /* * We currently do not free assignements of panel fitters on * ivb/hsw (since we don't use the higher upscaling modes which * differentiates them) so just WARN about this case for now. */ drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); } static bool ilk_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; u32 tmp; bool ret; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; ret = false; tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); if (!(tmp & TRANSCONF_ENABLE)) goto out; switch (tmp & TRANSCONF_BPC_MASK) { case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; case TRANSCONF_BPC_12: pipe_config->pipe_bpp = 36; break; default: break; } if (tmp & TRANSCONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { case TRANSCONF_OUTPUT_COLORSPACE_YUV601: case TRANSCONF_OUTPUT_COLORSPACE_YUV709: pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; break; default: pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; break; } pipe_config->sink_format = pipe_config->output_format; pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); pipe_config->pixel_multiplier = 1; ilk_pch_get_config(pipe_config); intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); ilk_get_pfit_config(pipe_config); ret = true; out: intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static u8 bigjoiner_pipes(struct drm_i915_private *i915) { u8 pipes; if (DISPLAY_VER(i915) >= 12) pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); else if (DISPLAY_VER(i915) >= 11) pipes = BIT(PIPE_B) | BIT(PIPE_C); else pipes = 0; return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; } static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; u32 tmp = 0; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); return tmp & TRANS_DDI_FUNC_ENABLE; } static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, u8 *master_pipes, u8 *slave_pipes) { struct intel_crtc *crtc; *master_pipes = 0; *slave_pipes = 0; for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, bigjoiner_pipes(dev_priv)) { enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); if (!(tmp & BIG_JOINER_ENABLE)) continue; if (tmp & MASTER_BIG_JOINER_ENABLE) *master_pipes |= BIT(pipe); else *slave_pipes |= BIT(pipe); } if (DISPLAY_VER(dev_priv) < 13) continue; power_domain = POWER_DOMAIN_PIPE(pipe); with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); if (tmp & UNCOMPRESSED_JOINER_MASTER) *master_pipes |= BIT(pipe); if (tmp & UNCOMPRESSED_JOINER_SLAVE) *slave_pipes |= BIT(pipe); } } /* Bigjoiner pipes should always be consecutive master and slave */ drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", *master_pipes, *slave_pipes); } static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) { if ((slave_pipes & BIT(pipe)) == 0) return pipe; /* ignore everything above our pipe */ master_pipes &= ~GENMASK(7, pipe); /* highest remaining bit should be our master pipe */ return fls(master_pipes) - 1; } static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) { enum pipe master_pipe, next_master_pipe; master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); if ((master_pipes & BIT(master_pipe)) == 0) return 0; /* ignore our master pipe and everything below it */ master_pipes &= ~GENMASK(master_pipe, 0); /* make sure a high bit is set for the ffs() */ master_pipes |= BIT(7); /* lowest remaining bit should be the next master pipe */ next_master_pipe = ffs(master_pipes) - 1; return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); } static u8 hsw_panel_transcoders(struct drm_i915_private *i915) { u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); if (DISPLAY_VER(i915) >= 11) panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); return panel_transcoder_mask; } static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); enum transcoder cpu_transcoder; u8 master_pipes, slave_pipes; u8 enabled_transcoders = 0; /* * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, panel_transcoder_mask) { enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; enum pipe trans_pipe; u32 tmp = 0; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (!(tmp & TRANS_DDI_FUNC_ENABLE)) continue; switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: drm_WARN(dev, 1, "unknown pipe linked to transcoder %s\n", transcoder_name(cpu_transcoder)); fallthrough; case TRANS_DDI_EDP_INPUT_A_ONOFF: case TRANS_DDI_EDP_INPUT_A_ON: trans_pipe = PIPE_A; break; case TRANS_DDI_EDP_INPUT_B_ONOFF: trans_pipe = PIPE_B; break; case TRANS_DDI_EDP_INPUT_C_ONOFF: trans_pipe = PIPE_C; break; case TRANS_DDI_EDP_INPUT_D_ONOFF: trans_pipe = PIPE_D; break; } if (trans_pipe == crtc->pipe) enabled_transcoders |= BIT(cpu_transcoder); } /* single pipe or bigjoiner master */ cpu_transcoder = (enum transcoder) crtc->pipe; if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); /* bigjoiner slave -> consider the master pipe's transcoder as well */ enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); if (slave_pipes & BIT(crtc->pipe)) { cpu_transcoder = (enum transcoder) get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); } return enabled_transcoders; } static bool has_edp_transcoders(u8 enabled_transcoders) { return enabled_transcoders & BIT(TRANSCODER_EDP); } static bool has_dsi_transcoders(u8 enabled_transcoders) { return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1)); } static bool has_pipe_transcoders(u8 enabled_transcoders) { return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1)); } static void assert_enabled_transcoders(struct drm_i915_private *i915, u8 enabled_transcoders) { /* Only one type of transcoder please */ drm_WARN_ON(&i915->drm, has_edp_transcoders(enabled_transcoders) + has_dsi_transcoders(enabled_transcoders) + has_pipe_transcoders(enabled_transcoders) > 1); /* Only DSI transcoders can be ganged */ drm_WARN_ON(&i915->drm, !has_dsi_transcoders(enabled_transcoders) && !is_power_of_2(enabled_transcoders)); } static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, struct intel_display_power_domain_set *power_domain_set) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); unsigned long enabled_transcoders; u32 tmp; enabled_transcoders = hsw_enabled_transcoders(crtc); if (!enabled_transcoders) return false; assert_enabled_transcoders(dev_priv, enabled_transcoders); /* * With the exception of DSI we should only ever have * a single enabled transcoder. With DSI let's just * pick the first one. */ pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) return false; if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) pipe_config->pch_pfit.force_thru = true; } tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); return tmp & TRANSCONF_ENABLE; } static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, struct intel_display_power_domain_set *power_domain_set) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder; enum port port; u32 tmp; for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { if (port == PORT_A) cpu_transcoder = TRANSCODER_DSI_A; else cpu_transcoder = TRANSCODER_DSI_C; if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, POWER_DOMAIN_TRANSCODER(cpu_transcoder))) continue; /* * The PLL needs to be enabled with a valid divider * configuration, otherwise accessing DSI registers will hang * the machine. See BSpec North Display Engine * registers/MIPI[BXT]. We can break out here early, since we * need the same DSI PLL to be enabled for both DSI ports. */ if (!bxt_dsi_pll_is_enabled(dev_priv)) break; /* XXX: this works for video mode only */ tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); if (!(tmp & DPI_ENABLE)) continue; tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) continue; pipe_config->cpu_transcoder = cpu_transcoder; break; } return transcoder_is_dsi(pipe_config->cpu_transcoder); } static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); u8 master_pipes, slave_pipes; enum pipe pipe = crtc->pipe; enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) return; crtc_state->bigjoiner_pipes = BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); } static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); bool active; u32 tmp; if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, POWER_DOMAIN_PIPE(crtc->pipe))) return false; pipe_config->shared_dpll = NULL; active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { drm_WARN_ON(&dev_priv->drm, active); active = true; } if (!active) goto out; intel_dsc_get_config(pipe_config); intel_bigjoiner_get_config(pipe_config); if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) intel_vrr_get_config(pipe_config); intel_get_pipe_src_size(crtc, pipe_config); if (IS_HASWELL(dev_priv)) { u32 tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; else pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; } else { pipe_config->output_format = bdw_get_pipe_misc_output_format(crtc); } pipe_config->sink_format = pipe_config->output_format; pipe_config->gamma_mode = intel_de_read(dev_priv, GAMMA_MODE(crtc->pipe)); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); if (DISPLAY_VER(dev_priv) >= 9) { tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) pipe_config->gamma_enable = true; if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) pipe_config->csc_enable = true; } else { i9xx_get_pipe_color_config(pipe_config); } intel_color_get_config(pipe_config); tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) pipe_config->ips_linetime = REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { if (DISPLAY_VER(dev_priv) >= 9) skl_scaler_get_config(pipe_config); else ilk_get_pfit_config(pipe_config); } hsw_ips_get_config(pipe_config); if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, TRANS_MULT(pipe_config->cpu_transcoder)) + 1; } else { pipe_config->pixel_multiplier = 1; } if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : CHICKEN_TRANS(pipe_config->cpu_transcoder)); pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; } else { /* no idea if this is correct */ pipe_config->framestart_delay = 1; } out: intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); return active; } bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) return false; crtc_state->hw.active = true; intel_crtc_readout_derived_state(crtc_state); return true; } static int i9xx_pll_refclk(struct drm_device *dev, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) return dev_priv->display.vbt.lvds_ssc_freq; else if (HAS_PCH_SPLIT(dev_priv)) return 120000; else if (DISPLAY_VER(dev_priv) != 2) return 96000; else return 48000; } /* Returns the clock of the currently programmed mode of the given pipe. */ void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; struct dpll clock; int port_clock; int refclk = i9xx_pll_refclk(dev, pipe_config); if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = pipe_config->dpll_hw_state.fp0; else fp = pipe_config->dpll_hw_state.fp1; clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev_priv)) { clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; } else { clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } if (DISPLAY_VER(dev_priv) != 2) { if (IS_PINEVIEW(dev_priv)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); else clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> DPLL_FPA01_P1_POST_DIV_SHIFT); switch (dpll & DPLL_MODE_MASK) { case DPLLB_MODE_DAC_SERIAL: clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5 : 10; break; case DPLLB_MODE_LVDS: clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 7 : 14; break; default: drm_dbg_kms(&dev_priv->drm, "Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); return; } if (IS_PINEVIEW(dev_priv)) port_clock = pnv_calc_dpll_params(refclk, &clock); else port_clock = i9xx_calc_dpll_params(refclk, &clock); } else { enum pipe lvds_pipe; if (IS_I85X(dev_priv) && intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && lvds_pipe == crtc->pipe) { u32 lvds = intel_de_read(dev_priv, LVDS); clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); if (lvds & LVDS_CLKB_POWER_UP) clock.p2 = 7; else clock.p2 = 14; } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; else { clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; } if (dpll & PLL_P2_DIVIDE_BY_4) clock.p2 = 4; else clock.p2 = 2; } port_clock = i9xx_calc_dpll_params(refclk, &clock); } /* * This value includes pixel_multiplier. We will use * port_clock to compute adjusted_mode.crtc_clock in the * encoder's get_config() function. */ pipe_config->port_clock = port_clock; } int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n) { /* * The calculation for the data clock is: * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp * But we want to avoid losing precison if possible, so: * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) * * and the link clock is simpler: * link_clock = (m * link_clock) / n */ if (!m_n->link_n) return 0; return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) { int dotclock; if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, pipe_config->pipe_bpp); else dotclock = pipe_config->port_clock; if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && !intel_crtc_has_dp_encoder(pipe_config)) dotclock *= 2; if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; return dotclock; } /* Returns the currently programmed mode of the given encoder. */ struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc_state *crtc_state; struct drm_display_mode *mode; struct intel_crtc *crtc; enum pipe pipe; if (!encoder->get_hw_state(encoder, &pipe)) return NULL; crtc = intel_crtc_for_pipe(dev_priv, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; crtc_state = intel_crtc_state_alloc(crtc); if (!crtc_state) { kfree(mode); return NULL; } if (!intel_crtc_get_pipe_config(crtc_state)) { kfree(crtc_state); kfree(mode); return NULL; } intel_encoder_get_config(encoder, crtc_state); intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); kfree(crtc_state); return mode; } static bool encoders_cloneable(const struct intel_encoder *a, const struct intel_encoder *b) { /* masks could be asymmetric, so check both ways */ return a == b || (a->cloneable & BIT(b->type) && b->cloneable & BIT(a->type)); } static bool check_single_encoder_cloning(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_encoder *source_encoder; struct drm_connector *connector; struct drm_connector_state *connector_state; int i; for_each_new_connector_in_state(&state->base, connector, connector_state, i) { if (connector_state->crtc != &crtc->base) continue; source_encoder = to_intel_encoder(connector_state->best_encoder); if (!encoders_cloneable(encoder, source_encoder)) return false; } return true; } static int icl_add_linked_planes(struct intel_atomic_state *state) { struct intel_plane *plane, *linked; struct intel_plane_state *plane_state, *linked_plane_state; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { linked = plane_state->planar_linked_plane; if (!linked) continue; linked_plane_state = intel_atomic_get_plane_state(state, linked); if (IS_ERR(linked_plane_state)) return PTR_ERR(linked_plane_state); drm_WARN_ON(state->base.dev, linked_plane_state->planar_linked_plane != plane); drm_WARN_ON(state->base.dev, linked_plane_state->planar_slave == plane_state->planar_slave); } return 0; } static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct intel_plane *plane, *linked; struct intel_plane_state *plane_state; int i; if (DISPLAY_VER(dev_priv) < 11) return 0; /* * Destroy all old plane links and make the slave plane invisible * in the crtc_state->active_planes mask. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) continue; plane_state->planar_linked_plane = NULL; if (plane_state->planar_slave && !plane_state->uapi.visible) { crtc_state->enabled_planes &= ~BIT(plane->id); crtc_state->active_planes &= ~BIT(plane->id); crtc_state->update_planes |= BIT(plane->id); crtc_state->data_rate[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; } plane_state->planar_slave = false; } if (!crtc_state->nv12_planes) return 0; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_plane_state *linked_state = NULL; if (plane->pipe != crtc->pipe || !(crtc_state->nv12_planes & BIT(plane->id))) continue; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { if (!icl_is_nv12_y_plane(dev_priv, linked->id)) continue; if (crtc_state->active_planes & BIT(linked->id)) continue; linked_state = intel_atomic_get_plane_state(state, linked); if (IS_ERR(linked_state)) return PTR_ERR(linked_state); break; } if (!linked_state) { drm_dbg_kms(&dev_priv->drm, "Need %d free Y planes for planar YUV\n", hweight8(crtc_state->nv12_planes)); return -EINVAL; } plane_state->planar_linked_plane = linked; linked_state->planar_slave = true; linked_state->planar_linked_plane = plane; crtc_state->enabled_planes |= BIT(linked->id); crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); crtc_state->data_rate[linked->id] = crtc_state->data_rate_y[plane->id]; crtc_state->rel_data_rate[linked->id] = crtc_state->rel_data_rate_y[plane->id]; drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", linked->base.name, plane->base.name); /* Copy parameters to slave plane */ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; linked_state->color_ctl = plane_state->color_ctl; linked_state->view = plane_state->view; linked_state->decrypt = plane_state->decrypt; intel_plane_copy_hw_state(linked_state, plane_state); linked_state->uapi.src = plane_state->uapi.src; linked_state->uapi.dst = plane_state->uapi.dst; if (icl_is_hdr_plane(dev_priv, plane->id)) { if (linked->id == PLANE_SPRITE5) plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; else if (linked->id == PLANE_SPRITE4) plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; else if (linked->id == PLANE_SPRITE3) plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; else if (linked->id == PLANE_SPRITE2) plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; else MISSING_CASE(linked->id); } } return 0; } static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) { const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, pipe_mode->crtc_clock); return min(linetime_wm, 0x1ff); } static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, const struct intel_cdclk_state *cdclk_state) { const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, cdclk_state->logical.cdclk); return min(linetime_wm, 0x1ff); } static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, crtc_state->pixel_rate); /* Display WA #1135: BXT:ALL GLK:ALL */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && skl_watermark_ipc_enabled(dev_priv)) linetime_wm /= 2; return min(linetime_wm, 0x1ff); } static int hsw_compute_linetime_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_cdclk_state *cdclk_state; if (DISPLAY_VER(dev_priv) >= 9) crtc_state->linetime = skl_linetime_wm(crtc_state); else crtc_state->linetime = hsw_linetime_wm(crtc_state); if (!hsw_crtc_supports_ips(crtc)) return 0; cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, cdclk_state); return 0; } static int intel_crtc_atomic_check(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && intel_crtc_needs_modeset(crtc_state) && !crtc_state->hw.active) crtc_state->update_wm_post = true; if (intel_crtc_needs_modeset(crtc_state)) { ret = intel_dpll_crtc_get_shared_dpll(state, crtc); if (ret) return ret; } /* * May need to update pipe gamma enable bits * when C8 planes are getting enabled/disabled. */ if (c8_planes_changed(crtc_state)) crtc_state->uapi.color_mgmt_changed = true; if (intel_crtc_needs_color_update(crtc_state)) { ret = intel_color_check(crtc_state); if (ret) return ret; } ret = intel_compute_pipe_wm(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, "Target pipe watermarks are invalid\n"); return ret; } /* * Calculate 'intermediate' watermarks that satisfy both the * old state and the new state. We can program these * immediately. */ ret = intel_compute_intermediate_wm(state, crtc); if (ret) { drm_dbg_kms(&dev_priv->drm, "No valid intermediate pipe watermarks are possible\n"); return ret; } if (DISPLAY_VER(dev_priv) >= 9) { if (intel_crtc_needs_modeset(crtc_state) || intel_crtc_needs_fastset(crtc_state)) { ret = skl_update_scaler_crtc(crtc_state); if (ret) return ret; } ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); if (ret) return ret; } if (HAS_IPS(dev_priv)) { ret = hsw_ips_compute_config(state, crtc); if (ret) return ret; } if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { ret = hsw_compute_linetime_wm(state, crtc); if (ret) return ret; } ret = intel_psr2_sel_fetch_update(state, crtc); if (ret) return ret; return 0; } static int compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, struct intel_crtc_state *crtc_state) { struct drm_connector *connector = conn_state->connector; struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_info *info = &connector->display_info; int bpp; switch (conn_state->max_bpc) { case 6 ... 7: bpp = 6 * 3; break; case 8 ... 9: bpp = 8 * 3; break; case 10 ... 11: bpp = 10 * 3; break; case 12 ... 16: bpp = 12 * 3; break; default: MISSING_CASE(conn_state->max_bpc); return -EINVAL; } if (bpp < crtc_state->pipe_bpp) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Limiting display bpp to %d " "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", connector->base.id, connector->name, bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, crtc_state->pipe_bpp); crtc_state->pipe_bpp = bpp; } return 0; } static int compute_baseline_pipe_bpp(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_connector *connector; struct drm_connector_state *connector_state; int bpp, i; if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) bpp = 10*3; else if (DISPLAY_VER(dev_priv) >= 5) bpp = 12*3; else bpp = 8*3; crtc_state->pipe_bpp = bpp; /* Clamp display bpp to connector max bpp */ for_each_new_connector_in_state(&state->base, connector, connector_state, i) { int ret; if (connector_state->crtc != &crtc->base) continue; ret = compute_sink_pipe_bpp(connector_state, crtc_state); if (ret) return ret; } return 0; } static bool check_digital_port_conflicts(struct intel_atomic_state *state) { struct drm_device *dev = state->base.dev; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; unsigned int used_ports = 0; unsigned int used_mst_ports = 0; bool ret = true; /* * We're going to peek into connector->state, * hence connection_mutex must be held. */ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); /* * Walk the connector list instead of the encoder * list to detect the problem on ddi platforms * where there's just one encoder per digital port. */ drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct drm_connector_state *connector_state; struct intel_encoder *encoder; connector_state = drm_atomic_get_new_connector_state(&state->base, connector); if (!connector_state) connector_state = connector->state; if (!connector_state->best_encoder) continue; encoder = to_intel_encoder(connector_state->best_encoder); drm_WARN_ON(dev, !connector_state->crtc); switch (encoder->type) { case INTEL_OUTPUT_DDI: if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) break; fallthrough; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: /* the same port mustn't appear more than once */ if (used_ports & BIT(encoder->port)) ret = false; used_ports |= BIT(encoder->port); break; case INTEL_OUTPUT_DP_MST: used_mst_ports |= 1 << encoder->port; break; default: break; } } drm_connector_list_iter_end(&conn_iter); /* can't mix MST and SST/HDMI on the same port */ if (used_ports & used_mst_ports) return false; return ret; } static void intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); drm_property_replace_blob(&crtc_state->hw.degamma_lut, crtc_state->uapi.degamma_lut); drm_property_replace_blob(&crtc_state->hw.gamma_lut, crtc_state->uapi.gamma_lut); drm_property_replace_blob(&crtc_state->hw.ctm, crtc_state->uapi.ctm); } static void intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); crtc_state->hw.enable = crtc_state->uapi.enable; crtc_state->hw.active = crtc_state->uapi.active; drm_mode_copy(&crtc_state->hw.mode, &crtc_state->uapi.mode); drm_mode_copy(&crtc_state->hw.adjusted_mode, &crtc_state->uapi.adjusted_mode); crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); } static void copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, struct intel_crtc *slave_crtc) { struct intel_crtc_state *slave_crtc_state = intel_atomic_get_new_crtc_state(state, slave_crtc); struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); const struct intel_crtc_state *master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, master_crtc_state->hw.degamma_lut); drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, master_crtc_state->hw.gamma_lut); drm_property_replace_blob(&slave_crtc_state->hw.ctm, master_crtc_state->hw.ctm); slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; } static int copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, struct intel_crtc *slave_crtc) { struct intel_crtc_state *slave_crtc_state = intel_atomic_get_new_crtc_state(state, slave_crtc); struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); const struct intel_crtc_state *master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); struct intel_crtc_state *saved_state; WARN_ON(master_crtc_state->bigjoiner_pipes != slave_crtc_state->bigjoiner_pipes); saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); if (!saved_state) return -ENOMEM; /* preserve some things from the slave's original crtc state */ saved_state->uapi = slave_crtc_state->uapi; saved_state->scaler_state = slave_crtc_state->scaler_state; saved_state->shared_dpll = slave_crtc_state->shared_dpll; saved_state->crc_enabled = slave_crtc_state->crc_enabled; intel_crtc_free_hw_state(slave_crtc_state); memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); kfree(saved_state); /* Re-init hw state */ memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); slave_crtc_state->hw.enable = master_crtc_state->hw.enable; slave_crtc_state->hw.active = master_crtc_state->hw.active; drm_mode_copy(&slave_crtc_state->hw.mode, &master_crtc_state->hw.mode); drm_mode_copy(&slave_crtc_state->hw.pipe_mode, &master_crtc_state->hw.pipe_mode); drm_mode_copy(&slave_crtc_state->hw.adjusted_mode, &master_crtc_state->hw.adjusted_mode); slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; WARN_ON(master_crtc_state->bigjoiner_pipes != slave_crtc_state->bigjoiner_pipes); return 0; } static int intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *saved_state; saved_state = intel_crtc_state_alloc(crtc); if (!saved_state) return -ENOMEM; /* free the old crtc_state->hw members */ intel_crtc_free_hw_state(crtc_state); /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be * fixed, so that the crtc_state can be safely duplicated. For now, * only fields that are know to not cause problems are preserved. */ saved_state->uapi = crtc_state->uapi; saved_state->inherited = crtc_state->inherited; saved_state->scaler_state = crtc_state->scaler_state; saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, sizeof(saved_state->icl_port_dplls)); saved_state->crc_enabled = crtc_state->crc_enabled; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) saved_state->wm = crtc_state->wm; memcpy(crtc_state, saved_state, sizeof(*crtc_state)); kfree(saved_state); intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); return 0; } static int intel_modeset_pipe_config(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_connector *connector; struct drm_connector_state *connector_state; int pipe_src_w, pipe_src_h; int base_bpp, ret, i; bool retry = true; crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; crtc_state->framestart_delay = 1; /* * Sanitize sync polarity flags based on requested ones. If neither * positive or negative polarity is requested, treat this as meaning * negative polarity. */ if (!(crtc_state->hw.adjusted_mode.flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; if (!(crtc_state->hw.adjusted_mode.flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; ret = compute_baseline_pipe_bpp(state, crtc); if (ret) return ret; base_bpp = crtc_state->pipe_bpp; /* * Determine the real pipe dimensions. Note that stereo modes can * increase the actual pipe size due to the frame doubling and * insertion of additional space for blanks between the frame. This * is stored in the crtc timings. We use the requested mode to do this * computation to clearly distinguish it from the adjusted mode, which * can be changed by the connectors in the below retry loop. */ drm_mode_get_hv_timing(&crtc_state->hw.mode, &pipe_src_w, &pipe_src_h); drm_rect_init(&crtc_state->pipe_src, 0, 0, pipe_src_w, pipe_src_h); for_each_new_connector_in_state(&state->base, connector, connector_state, i) { struct intel_encoder *encoder = to_intel_encoder(connector_state->best_encoder); if (connector_state->crtc != &crtc->base) continue; if (!check_single_encoder_cloning(state, crtc, encoder)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", encoder->base.base.id, encoder->base.name); return -EINVAL; } /* * Determine output_types before calling the .compute_config() * hooks so that the hooks can use this information safely. */ if (encoder->compute_output_type) crtc_state->output_types |= BIT(encoder->compute_output_type(encoder, crtc_state, connector_state)); else crtc_state->output_types |= BIT(encoder->type); } encoder_retry: /* Ensure the port clock defaults are reset when retrying. */ crtc_state->port_clock = 0; crtc_state->pixel_multiplier = 1; /* Fill in default crtc timings, allow encoders to overwrite them. */ drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, CRTC_STEREO_DOUBLE); /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ for_each_new_connector_in_state(&state->base, connector, connector_state, i) { struct intel_encoder *encoder = to_intel_encoder(connector_state->best_encoder); if (connector_state->crtc != &crtc->base) continue; ret = encoder->compute_config(encoder, crtc_state, connector_state); if (ret == -EDEADLK) return ret; if (ret < 0) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", encoder->base.base.id, encoder->base.name, ret); return ret; } } /* Set default port clock if not overwritten by the encoder. Needs to be * done afterwards in case the encoder adjusts the mode. */ if (!crtc_state->port_clock) crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock * crtc_state->pixel_multiplier; ret = intel_crtc_compute_config(state, crtc); if (ret == -EDEADLK) return ret; if (ret == -EAGAIN) { if (drm_WARN(&i915->drm, !retry, "[CRTC:%d:%s] loop in pipe configuration computation\n", crtc->base.base.id, crtc->base.name)) return -EINVAL; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n", crtc->base.base.id, crtc->base.name); retry = false; goto encoder_retry; } if (ret < 0) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", crtc->base.base.id, crtc->base.name, ret); return ret; } /* Dithering seems to not pass-through bits correctly when it should, so * only enable it on 6bpc panels and when its not a compliance * test requesting 6bpc video pattern. */ crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && !crtc_state->dither_force_disable; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", crtc->base.base.id, crtc->base.name, base_bpp, crtc_state->pipe_bpp, crtc_state->dither); return 0; } static int intel_modeset_pipe_config_late(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_connector_state *conn_state; struct drm_connector *connector; int i; intel_bigjoiner_adjust_pipe_src(crtc_state); for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); int ret; if (conn_state->crtc != &crtc->base || !encoder->compute_config_late) continue; ret = encoder->compute_config_late(encoder, crtc_state, conn_state); if (ret) return ret; } return 0; } bool intel_fuzzy_clock_check(int clock1, int clock2) { int diff; if (clock1 == clock2) return true; if (!clock1 || !clock2) return false; diff = abs(clock1 - clock2); if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) return true; return false; } static bool intel_compare_link_m_n(const struct intel_link_m_n *m_n, const struct intel_link_m_n *m2_n2) { return m_n->tu == m2_n2->tu && m_n->data_m == m2_n2->data_m && m_n->data_n == m2_n2->data_n && m_n->link_m == m2_n2->link_m && m_n->link_n == m2_n2->link_n; } static bool intel_compare_infoframe(const union hdmi_infoframe *a, const union hdmi_infoframe *b) { return memcmp(a, b, sizeof(*a)) == 0; } static bool intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, const struct drm_dp_vsc_sdp *b) { return memcmp(a, b, sizeof(*a)) == 0; } static bool intel_compare_buffer(const u8 *a, const u8 *b, size_t len) { return memcmp(a, b, len) == 0; } static void pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, bool fastset, const char *name, const union hdmi_infoframe *a, const union hdmi_infoframe *b) { if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; drm_dbg_kms(&dev_priv->drm, "fastset requirement not met in %s infoframe\n", name); drm_dbg_kms(&dev_priv->drm, "expected:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); drm_dbg_kms(&dev_priv->drm, "found:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); } else { drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); drm_err(&dev_priv->drm, "expected:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); drm_err(&dev_priv->drm, "found:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); } } static void pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, bool fastset, const char *name, const struct drm_dp_vsc_sdp *a, const struct drm_dp_vsc_sdp *b) { if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; drm_dbg_kms(&dev_priv->drm, "fastset requirement not met in %s dp sdp\n", name); drm_dbg_kms(&dev_priv->drm, "expected:\n"); drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); drm_dbg_kms(&dev_priv->drm, "found:\n"); drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); } else { drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); drm_err(&dev_priv->drm, "expected:\n"); drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); drm_err(&dev_priv->drm, "found:\n"); drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); } } /* Returns the length up to and including the last differing byte */ static size_t memcmp_diff_len(const u8 *a, const u8 *b, size_t len) { int i; for (i = len - 1; i >= 0; i--) { if (a[i] != b[i]) return i + 1; } return 0; } static void pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, bool fastset, const char *name, const u8 *a, const u8 *b, size_t len) { if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; /* only dump up to the last difference */ len = memcmp_diff_len(a, b, len); drm_dbg_kms(&dev_priv->drm, "fastset requirement not met in %s buffer\n", name); print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 16, 0, b, len, false); } else { /* only dump up to the last difference */ len = memcmp_diff_len(a, b, len); drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, 16, 0, b, len, false); } } static void __printf(4, 5) pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, const char *name, const char *format, ...) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; if (fastset) drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", crtc->base.base.id, crtc->base.name, name, &vaf); else drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", crtc->base.base.id, crtc->base.name, name, &vaf); va_end(args); } static bool fastboot_enabled(struct drm_i915_private *dev_priv) { if (dev_priv->params.fastboot != -1) return dev_priv->params.fastboot; /* Enable fastboot by default on Skylake and newer */ if (DISPLAY_VER(dev_priv) >= 9) return true; /* Enable fastboot by default on VLV and CHV */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return true; /* Disabled by default on all others */ return false; } bool intel_pipe_config_compare(const struct intel_crtc_state *current_config, const struct intel_crtc_state *pipe_config, bool fastset) { struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); bool ret = true; bool fixup_inherited = fastset && current_config->inherited && !pipe_config->inherited; if (fixup_inherited && !fastboot_enabled(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "initial modeset and fastboot not set\n"); ret = false; } #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name, \ pipe_config->name); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name & (mask), \ pipe_config->name & (mask)); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_I(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected %i, found %i)", \ current_config->name, \ pipe_config->name); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_BOOL(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected %s, found %s)", \ str_yes_no(current_config->name), \ str_yes_no(pipe_config->name)); \ ret = false; \ } \ } while (0) /* * Checks state where we only read out the enabling, but not the entire * state itself (like full infoframes or ELD for audio). These states * require a full modeset on bootup to fix up. */ #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ PIPE_CONF_CHECK_BOOL(name); \ } else { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ str_yes_no(current_config->name), \ str_yes_no(pipe_config->name)); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_P(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected %p, found %p)", \ current_config->name, \ pipe_config->name); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_M_N(name) do { \ if (!intel_compare_link_m_n(&current_config->name, \ &pipe_config->name)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ current_config->name.data_m, \ current_config->name.data_n, \ current_config->name.link_m, \ current_config->name.link_n, \ pipe_config->name.tu, \ pipe_config->name.data_m, \ pipe_config->name.data_n, \ pipe_config->name.link_m, \ pipe_config->name.link_n); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_TIMINGS(name) do { \ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ PIPE_CONF_CHECK_I(name.crtc_htotal); \ PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ PIPE_CONF_CHECK_I(name.crtc_vtotal); \ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ } while (0) #define PIPE_CONF_CHECK_RECT(name) do { \ PIPE_CONF_CHECK_I(name.x1); \ PIPE_CONF_CHECK_I(name.x2); \ PIPE_CONF_CHECK_I(name.y1); \ PIPE_CONF_CHECK_I(name.y2); \ } while (0) #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(%x) (expected %i, found %i)", \ (mask), \ current_config->name & (mask), \ pipe_config->name & (mask)); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ if (!intel_compare_infoframe(&current_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ &current_config->infoframes.name, \ &pipe_config->infoframes.name); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ if (!current_config->has_psr && !pipe_config->has_psr && \ !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ &current_config->infoframes.name, \ &pipe_config->infoframes.name); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \ current_config->name, \ pipe_config->name, \ (len)); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ if (current_config->gamma_mode == pipe_config->gamma_mode && \ !intel_color_lut_equal(current_config, \ current_config->lut, pipe_config->lut, \ is_pre_csc_lut)) { \ pipe_config_mismatch(fastset, crtc, __stringify(lut), \ "hw_state doesn't match sw_state"); \ ret = false; \ } \ } while (0) #define PIPE_CONF_CHECK_CSC(name) do { \ PIPE_CONF_CHECK_X(name.preoff[0]); \ PIPE_CONF_CHECK_X(name.preoff[1]); \ PIPE_CONF_CHECK_X(name.preoff[2]); \ PIPE_CONF_CHECK_X(name.coeff[0]); \ PIPE_CONF_CHECK_X(name.coeff[1]); \ PIPE_CONF_CHECK_X(name.coeff[2]); \ PIPE_CONF_CHECK_X(name.coeff[3]); \ PIPE_CONF_CHECK_X(name.coeff[4]); \ PIPE_CONF_CHECK_X(name.coeff[5]); \ PIPE_CONF_CHECK_X(name.coeff[6]); \ PIPE_CONF_CHECK_X(name.coeff[7]); \ PIPE_CONF_CHECK_X(name.coeff[8]); \ PIPE_CONF_CHECK_X(name.postoff[0]); \ PIPE_CONF_CHECK_X(name.postoff[1]); \ PIPE_CONF_CHECK_X(name.postoff[2]); \ } while (0) #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) PIPE_CONF_CHECK_I(hw.enable); PIPE_CONF_CHECK_I(hw.active); PIPE_CONF_CHECK_I(cpu_transcoder); PIPE_CONF_CHECK_I(mst_master_transcoder); PIPE_CONF_CHECK_BOOL(has_pch_encoder); PIPE_CONF_CHECK_I(fdi_lanes); PIPE_CONF_CHECK_M_N(fdi_m_n); PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { if (!fastset || !pipe_config->seamless_m_n) PIPE_CONF_CHECK_M_N(dp_m_n); } else { PIPE_CONF_CHECK_M_N(dp_m_n); PIPE_CONF_CHECK_M_N(dp_m2_n2); } PIPE_CONF_CHECK_X(output_types); PIPE_CONF_CHECK_I(framestart_delay); PIPE_CONF_CHECK_I(msa_timing_delay); PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_INTERLACE); if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_PHSYNC); PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_NHSYNC); PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_PVSYNC); PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_NVSYNC); } PIPE_CONF_CHECK_I(output_format); PIPE_CONF_CHECK_BOOL(has_hdmi_sink); if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) PIPE_CONF_CHECK_BOOL(limited_color_range); PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ if (DISPLAY_VER(dev_priv) < 4) PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); /* * Changing the EDP transcoder input mux * (A_ONOFF vs. A_ON) requires a full modeset. */ PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); if (!fastset) { PIPE_CONF_CHECK_RECT(pipe_src); PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); PIPE_CONF_CHECK_RECT(pch_pfit.dst); PIPE_CONF_CHECK_I(scaler_state.scaler_id); PIPE_CONF_CHECK_I(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) PIPE_CONF_CHECK_X(cgm_mode); else PIPE_CONF_CHECK_X(csc_mode); PIPE_CONF_CHECK_BOOL(gamma_enable); PIPE_CONF_CHECK_BOOL(csc_enable); PIPE_CONF_CHECK_BOOL(wgc_enable); PIPE_CONF_CHECK_I(linetime); PIPE_CONF_CHECK_I(ips_linetime); PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); PIPE_CONF_CHECK_CSC(csc); PIPE_CONF_CHECK_CSC(output_csc); if (current_config->active_planes) { PIPE_CONF_CHECK_BOOL(has_psr); PIPE_CONF_CHECK_BOOL(has_psr2); PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); PIPE_CONF_CHECK_I(dc3co_exitline); } } PIPE_CONF_CHECK_BOOL(double_wide); if (dev_priv->display.dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); PIPE_CONF_CHECK_X(dpll_hw_state.spll); PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); PIPE_CONF_CHECK_X(dpll_hw_state.div0); PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); PIPE_CONF_CHECK_X(dpll_hw_state.pll0); PIPE_CONF_CHECK_X(dpll_hw_state.pll1); PIPE_CONF_CHECK_X(dpll_hw_state.pll2); PIPE_CONF_CHECK_X(dpll_hw_state.pll3); PIPE_CONF_CHECK_X(dpll_hw_state.pll6); PIPE_CONF_CHECK_X(dpll_hw_state.pll8); PIPE_CONF_CHECK_X(dpll_hw_state.pll9); PIPE_CONF_CHECK_X(dpll_hw_state.pll10); PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); } PIPE_CONF_CHECK_X(dsi_pll.ctrl); PIPE_CONF_CHECK_X(dsi_pll.div); if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) PIPE_CONF_CHECK_I(pipe_bpp); if (!fastset || !pipe_config->seamless_m_n) { PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); } PIPE_CONF_CHECK_I(port_clock); PIPE_CONF_CHECK_I(min_voltage_level); if (current_config->has_psr || pipe_config->has_psr) PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); else PIPE_CONF_CHECK_X(infoframes.enable); PIPE_CONF_CHECK_X(infoframes.gcp); PIPE_CONF_CHECK_INFOFRAME(avi); PIPE_CONF_CHECK_INFOFRAME(spd); PIPE_CONF_CHECK_INFOFRAME(hdmi); PIPE_CONF_CHECK_INFOFRAME(drm); PIPE_CONF_CHECK_DP_VSC_SDP(vsc); PIPE_CONF_CHECK_X(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); PIPE_CONF_CHECK_X(bigjoiner_pipes); PIPE_CONF_CHECK_I(dsc.compression_enable); PIPE_CONF_CHECK_I(dsc.dsc_split); PIPE_CONF_CHECK_I(dsc.compressed_bpp); PIPE_CONF_CHECK_BOOL(splitter.enable); PIPE_CONF_CHECK_I(splitter.link_count); PIPE_CONF_CHECK_I(splitter.pixel_overlap); if (!fastset) PIPE_CONF_CHECK_BOOL(vrr.enable); PIPE_CONF_CHECK_I(vrr.vmin); PIPE_CONF_CHECK_I(vrr.vmax); PIPE_CONF_CHECK_I(vrr.flipline); PIPE_CONF_CHECK_I(vrr.pipeline_full); PIPE_CONF_CHECK_I(vrr.guardband); #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_COLOR_LUT #undef PIPE_CONF_CHECK_TIMINGS #undef PIPE_CONF_CHECK_RECT #undef PIPE_CONF_QUIRK return ret; } static void intel_verify_planes(struct intel_atomic_state *state) { struct intel_plane *plane; const struct intel_plane_state *plane_state; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) assert_plane(plane, plane_state->planar_slave || plane_state->uapi.visible); } int intel_modeset_all_pipes(struct intel_atomic_state *state, const char *reason) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; /* * Add all pipes to the state, and force * a modeset on all the active ones. */ for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state; int ret; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state)) continue; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n", crtc->base.base.id, crtc->base.name, reason); crtc_state->uapi.mode_changed = true; crtc_state->update_pipe = false; ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); if (ret) return ret; ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); if (ret) return ret; ret = intel_atomic_add_affected_planes(state, crtc); if (ret) return ret; crtc_state->update_planes |= crtc_state->active_planes; crtc_state->async_flip_planes = 0; crtc_state->do_async_flip = false; } return 0; } /* * This implements the workaround described in the "notes" section of the mode * set sequence documentation. When going from no pipes or single pipe to * multiple pipes, and planes are enabled after the pipe, we need to wait at * least 2 vblanks on the first pipe before enabling planes on the second pipe. */ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; struct intel_crtc_state *first_crtc_state = NULL; struct intel_crtc_state *other_crtc_state = NULL; enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; int i; /* look at all crtc's that are going to be enabled in during modeset */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.active || !intel_crtc_needs_modeset(crtc_state)) continue; if (first_crtc_state) { other_crtc_state = crtc_state; break; } else { first_crtc_state = crtc_state; first_pipe = crtc->pipe; } } /* No workaround needed? */ if (!first_crtc_state) return 0; /* w/a possibly needed, check how many crtc's are already enabled. */ for_each_intel_crtc(state->base.dev, crtc) { crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); crtc_state->hsw_workaround_pipe = INVALID_PIPE; if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state)) continue; /* 2 or more enabled crtcs means no need for w/a */ if (enabled_pipe != INVALID_PIPE) return 0; enabled_pipe = crtc->pipe; } if (enabled_pipe != INVALID_PIPE) first_crtc_state->hsw_workaround_pipe = enabled_pipe; else if (other_crtc_state) other_crtc_state->hsw_workaround_pipe = first_pipe; return 0; } u8 intel_calc_active_pipes(struct intel_atomic_state *state, u8 active_pipes) { const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (crtc_state->hw.active) active_pipes |= BIT(crtc->pipe); else active_pipes &= ~BIT(crtc->pipe); } return active_pipes; } static int intel_modeset_checks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); state->modeset = true; if (IS_HASWELL(dev_priv)) return hsw_mode_set_planes_workaround(state); return 0; } static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) { drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); return; } new_crtc_state->uapi.mode_changed = false; if (!intel_crtc_needs_modeset(new_crtc_state)) new_crtc_state->update_pipe = true; } static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, struct intel_crtc *crtc, u8 plane_ids_mask) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_plane *plane; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { struct intel_plane_state *plane_state; if ((plane_ids_mask & BIT(plane->id)) == 0) continue; plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); } return 0; } int intel_atomic_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); return intel_crtc_add_planes_to_state(state, crtc, old_crtc_state->enabled_planes | new_crtc_state->enabled_planes); } static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) { /* See {hsw,vlv,ivb}_plane_ratio() */ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_IVYBRIDGE(dev_priv); } static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_crtc *other) { const struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; u8 plane_ids = 0; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->pipe == crtc->pipe) plane_ids |= BIT(plane->id); } return intel_crtc_add_planes_to_state(state, other, plane_ids); } static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *other; for_each_intel_crtc_in_pipe_mask(&i915->drm, other, crtc_state->bigjoiner_pipes) { int ret; if (crtc == other) continue; ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); if (ret) return ret; } } return 0; } static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; struct intel_crtc *crtc; int i, ret; ret = icl_add_linked_planes(state); if (ret) return ret; ret = intel_bigjoiner_add_affected_planes(state); if (ret) return ret; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { ret = intel_plane_atomic_check(state, plane); if (ret) { drm_dbg_atomic(&dev_priv->drm, "[PLANE:%d:%s] atomic driver check failed\n", plane->base.base.id, plane->base.name); return ret; } } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { u8 old_active_planes, new_active_planes; ret = icl_check_nv12_planes(new_crtc_state); if (ret) return ret; /* * On some platforms the number of active planes affects * the planes' minimum cdclk calculation. Add such planes * to the state before we compute the minimum cdclk. */ if (!active_planes_affects_min_cdclk(dev_priv)) continue; old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); if (hweight8(old_active_planes) == hweight8(new_active_planes)) continue; ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); if (ret) return ret; } return 0; } static int intel_atomic_check_crtcs(struct intel_atomic_state *state) { struct intel_crtc_state __maybe_unused *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); int ret; ret = intel_crtc_atomic_check(state, crtc); if (ret) { drm_dbg_atomic(&i915->drm, "[CRTC:%d:%s] atomic driver check failed\n", crtc->base.base.id, crtc->base.name); return ret; } } return 0; } static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, u8 transcoders) { const struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->hw.enable && transcoders & BIT(new_crtc_state->cpu_transcoder) && intel_crtc_needs_modeset(new_crtc_state)) return true; } return false; } static bool intel_pipes_need_modeset(struct intel_atomic_state *state, u8 pipes) { const struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->hw.enable && pipes & BIT(crtc->pipe) && intel_crtc_needs_modeset(new_crtc_state)) return true; } return false; } static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, struct intel_crtc *master_crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); struct intel_crtc *slave_crtc; if (!master_crtc_state->bigjoiner_pipes) return 0; /* sanity check */ if (drm_WARN_ON(&i915->drm, master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state))) return -EINVAL; if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Cannot act as big joiner master " "(need 0x%x as pipes, only 0x%x possible)\n", master_crtc->base.base.id, master_crtc->base.name, master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915)); return -EINVAL; } for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { struct intel_crtc_state *slave_crtc_state; int ret; slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); if (IS_ERR(slave_crtc_state)) return PTR_ERR(slave_crtc_state); /* master being enabled, slave was already configured? */ if (slave_crtc_state->uapi.enable) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", slave_crtc->base.base.id, slave_crtc->base.name, master_crtc->base.base.id, master_crtc->base.name); return -EINVAL; } /* * The state copy logic assumes the master crtc gets processed * before the slave crtc during the main compute_config loop. * This works because the crtcs are created in pipe order, * and the hardware requires master pipe < slave pipe as well. * Should that change we need to rethink the logic. */ if (WARN_ON(drm_crtc_index(&master_crtc->base) > drm_crtc_index(&slave_crtc->base))) return -EINVAL; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", slave_crtc->base.base.id, slave_crtc->base.name, master_crtc->base.base.id, master_crtc->base.name); slave_crtc_state->bigjoiner_pipes = master_crtc_state->bigjoiner_pipes; ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); if (ret) return ret; } return 0; } static void kill_bigjoiner_slave(struct intel_atomic_state *state, struct intel_crtc *master_crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); struct intel_crtc *slave_crtc; for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { struct intel_crtc_state *slave_crtc_state = intel_atomic_get_new_crtc_state(state, slave_crtc); slave_crtc_state->bigjoiner_pipes = 0; intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); } master_crtc_state->bigjoiner_pipes = 0; } /** * DOC: asynchronous flip implementation * * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. * Correspondingly, support is currently added for primary plane only. * * Async flip can only change the plane surface address, so anything else * changing is rejected from the intel_async_flip_check_hw() function. * Once this check is cleared, flip done interrupt is enabled using * the intel_crtc_enable_flip_done() function. * * As soon as the surface address register is written, flip done interrupt is * generated and the requested events are sent to the usersapce in the interrupt * handler itself. The timestamp and sequence sent during the flip done event * correspond to the last vblank and have no relation to the actual time when * the flip done event was sent. */ static int intel_async_flip_check_uapi(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *old_plane_state; struct intel_plane_state *new_plane_state; struct intel_plane *plane; int i; if (!new_crtc_state->uapi.async_flip) return 0; if (!new_crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] not active\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } if (intel_crtc_needs_modeset(new_crtc_state)) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] modeset required\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (plane->pipe != crtc->pipe) continue; /* * TODO: Async flip is only supported through the page flip IOCTL * as of now. So support currently added for primary plane only. * Support for other planes on platforms on which supports * this(vlv/chv and icl+) should be added when async flip is * enabled in the atomic IOCTL path. */ if (!plane->async_flip) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] async flip not supported\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] no old or new framebuffer\n", plane->base.base.id, plane->base.name); return -EINVAL; } } return 0; } static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state, *new_crtc_state; const struct intel_plane_state *new_plane_state, *old_plane_state; struct intel_plane *plane; int i; old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state->uapi.async_flip) return 0; if (!new_crtc_state->hw.active) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] not active\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } if (intel_crtc_needs_modeset(new_crtc_state)) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] modeset required\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } if (old_crtc_state->active_planes != new_crtc_state->active_planes) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Active planes cannot be in async flip\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (plane->pipe != crtc->pipe) continue; /* * Only async flip capable planes should be in the state * if we're really about to ask the hardware to perform * an async flip. We should never get this far otherwise. */ if (drm_WARN_ON(&i915->drm, new_crtc_state->do_async_flip && !plane->async_flip)) return -EINVAL; /* * Only check async flip capable planes other planes * may be involved in the initial commit due to * the wm0/ddb optimization. * * TODO maybe should track which planes actually * were requested to do the async flip... */ if (!plane->async_flip) continue; /* * FIXME: This check is kept generic for all platforms. * Need to verify this for all gen9 platforms to enable * this selectively if required. */ switch (new_plane_state->hw.fb->modifier) { case DRM_FORMAT_MOD_LINEAR: /* * FIXME: Async on Linear buffer is supported on ICL as * but with additional alignment and fbc restrictions * need to be taken care of. These aren't applicable for * gen12+. */ if (DISPLAY_VER(i915) < 12) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", plane->base.base.id, plane->base.name, new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); return -EINVAL; } break; case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: case I915_FORMAT_MOD_4_TILED: break; default: drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", plane->base.base.id, plane->base.name, new_plane_state->hw.fb->modifier); return -EINVAL; } if (new_plane_state->hw.fb->format->num_planes > 1) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Planar formats do not support async flips\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->view.color_plane[0].mapping_stride != new_plane_state->view.color_plane[0].mapping_stride) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Stride cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.fb->modifier != new_plane_state->hw.fb->modifier) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.fb->format != new_plane_state->hw.fb->format) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.rotation != new_plane_state->hw.rotation) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { drm_dbg_kms(&i915->drm, "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.pixel_blend_mode != new_plane_state->hw.pixel_blend_mode) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Color range cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } /* plane decryption is allow to change only in synchronous flips */ if (old_plane_state->decrypt != new_plane_state->decrypt) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } } return 0; } static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; u8 affected_pipes = 0; u8 modeset_pipes = 0; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { affected_pipes |= crtc_state->bigjoiner_pipes; if (intel_crtc_needs_modeset(crtc_state)) modeset_pipes |= crtc_state->bigjoiner_pipes; } for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); } for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { int ret; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); crtc_state->uapi.mode_changed = true; ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); if (ret) return ret; ret = intel_atomic_add_affected_planes(state, crtc); if (ret) return ret; } for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { /* Kill old bigjoiner link, we may re-establish afterwards */ if (intel_crtc_needs_modeset(crtc_state) && intel_crtc_is_bigjoiner_master(crtc_state)) kill_bigjoiner_slave(state, crtc); } return 0; } /** * intel_atomic_check - validate state object * @dev: drm device * @_state: state to validate */ int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *_state) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; int ret, i; bool any_ms = false; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { /* * crtc's state no longer considered to be inherited * after the first userspace/client initiated commit. */ if (!state->internal) new_crtc_state->inherited = false; if (new_crtc_state->inherited != old_crtc_state->inherited) new_crtc_state->uapi.mode_changed = true; if (new_crtc_state->uapi.scaling_filter != old_crtc_state->uapi.scaling_filter) new_crtc_state->uapi.mode_changed = true; } intel_vrr_check_modeset(state); ret = drm_atomic_helper_check_modeset(dev, &state->base); if (ret) goto fail; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = intel_async_flip_check_uapi(state, crtc); if (ret) return ret; } ret = intel_bigjoiner_add_affected_crtcs(state); if (ret) goto fail; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) { if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) copy_bigjoiner_crtc_state_nomodeset(state, crtc); else intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); continue; } if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); continue; } ret = intel_crtc_prepare_cleared_state(state, crtc); if (ret) goto fail; if (!new_crtc_state->hw.enable) continue; ret = intel_modeset_pipe_config(state, crtc); if (ret) goto fail; ret = intel_atomic_check_bigjoiner(state, crtc); if (ret) goto fail; } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (new_crtc_state->hw.enable) { ret = intel_modeset_pipe_config_late(state, crtc); if (ret) goto fail; } intel_crtc_check_fastset(old_crtc_state, new_crtc_state); } /** * Check if fastset is allowed by external dependencies like other * pipes and transcoders. * * Right now it only forces a fullmodeset when the MST master * transcoder did not changed but the pipe of the master transcoder * needs a fullmodeset so all slaves also needs to do a fullmodeset or * in case of port synced crtcs, if one of the synced crtcs * needs a full modeset, all other synced crtcs should be * forced a full modeset. */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) continue; if (intel_dp_mst_is_slave_trans(new_crtc_state)) { enum transcoder master = new_crtc_state->mst_master_transcoder; if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { new_crtc_state->uapi.mode_changed = true; new_crtc_state->update_pipe = false; } } if (is_trans_port_sync_mode(new_crtc_state)) { u8 trans = new_crtc_state->sync_mode_slaves_mask; if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) trans |= BIT(new_crtc_state->master_transcoder); if (intel_cpu_transcoders_need_modeset(state, trans)) { new_crtc_state->uapi.mode_changed = true; new_crtc_state->update_pipe = false; } } if (new_crtc_state->bigjoiner_pipes) { if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { new_crtc_state->uapi.mode_changed = true; new_crtc_state->update_pipe = false; } } } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; any_ms = true; intel_release_shared_dplls(state, crtc); } if (any_ms && !check_digital_port_conflicts(state)) { drm_dbg_kms(&dev_priv->drm, "rejecting conflicting digital port configuration\n"); ret = -EINVAL; goto fail; } ret = drm_dp_mst_atomic_check(&state->base); if (ret) goto fail; ret = intel_atomic_check_planes(state); if (ret) goto fail; ret = intel_compute_global_watermarks(state); if (ret) goto fail; ret = intel_bw_atomic_check(state); if (ret) goto fail; ret = intel_cdclk_atomic_check(state, &any_ms); if (ret) goto fail; if (intel_any_crtc_needs_modeset(state)) any_ms = true; if (any_ms) { ret = intel_modeset_checks(state); if (ret) goto fail; ret = intel_modeset_calc_cdclk(state); if (ret) return ret; } ret = intel_pmdemand_atomic_check(state); if (ret) goto fail; ret = intel_atomic_check_crtcs(state); if (ret) goto fail; ret = intel_fbc_atomic_check(state); if (ret) goto fail; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_color_assert_luts(new_crtc_state); ret = intel_async_flip_check_hw(state, crtc); if (ret) goto fail; /* Either full modeset or fastset (or neither), never both */ drm_WARN_ON(&dev_priv->drm, intel_crtc_needs_modeset(new_crtc_state) && intel_crtc_needs_fastset(new_crtc_state)); if (!intel_crtc_needs_modeset(new_crtc_state) && !intel_crtc_needs_fastset(new_crtc_state)) continue; intel_crtc_state_dump(new_crtc_state, state, intel_crtc_needs_modeset(new_crtc_state) ? "modeset" : "fastset"); } return 0; fail: if (ret == -EDEADLK) return ret; /* * FIXME would probably be nice to know which crtc specifically * caused the failure, in cases where we can pinpoint it. */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) intel_crtc_state_dump(new_crtc_state, state, "failed"); return ret; } static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i, ret; ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); if (ret < 0) return ret; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (intel_crtc_needs_color_update(crtc_state)) intel_color_prepare_commit(crtc_state); } return 0; } void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) { enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); } } static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* * Update pipe size and adjust fitter if needed: the reason for this is * that in compute_mode_changes we check the native mode (not the pfit * mode) to see if we can flip rather than do a full mode set. In the * fastboot case, we'll flip, but if we don't update the pipesrc and * pfit state, we'll end up with a big fb scanned out into the wrong * sized surface. */ intel_set_pipe_src_size(new_crtc_state); /* on skylake this is done by detaching scalers */ if (DISPLAY_VER(dev_priv) >= 9) { if (new_crtc_state->pch_pfit.enabled) skl_pfit_enable(new_crtc_state); } else if (HAS_PCH_SPLIT(dev_priv)) { if (new_crtc_state->pch_pfit.enabled) ilk_pfit_enable(new_crtc_state); else if (old_crtc_state->pch_pfit.enabled) ilk_pfit_disable(old_crtc_state); } /* * The register is supposedly single buffered so perhaps * not 100% correct to do this here. But SKL+ calculate * this based on the adjust pixel rate so pfit changes do * affect it and so it must be updated for fastsets. * HSW/BDW only really need this here for fastboot, after * that the value should not change without a full modeset. */ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) hsw_set_linetime_wm(new_crtc_state); if (new_crtc_state->seamless_m_n) intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, &new_crtc_state->dp_m_n); } static void commit_pipe_pre_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); bool modeset = intel_crtc_needs_modeset(new_crtc_state); /* * During modesets pipe configuration was programmed as the * CRTC was enabled. */ if (!modeset) { if (intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_arm(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) bdw_set_pipe_misc(new_crtc_state); if (intel_crtc_needs_fastset(new_crtc_state)) intel_pipe_fastset(old_crtc_state, new_crtc_state); } intel_psr2_program_trans_man_trk_ctl(new_crtc_state); intel_atomic_update_watermarks(state, crtc); } static void commit_pipe_post_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); /* * Disable the scaler(s) after the plane(s) so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. */ if (DISPLAY_VER(dev_priv) >= 9 && !intel_crtc_needs_modeset(new_crtc_state)) skl_detach_scalers(new_crtc_state); } static void intel_enable_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!intel_crtc_needs_modeset(new_crtc_state)) return; /* VRR will be enable later, if required */ intel_crtc_update_active_timings(new_crtc_state, false); dev_priv->display.funcs.display->crtc_enable(state, crtc); if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) return; /* vblanks work again, re-enable pipe CRC. */ intel_crtc_enable_pipe_crc(crtc); } static void intel_update_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); bool modeset = intel_crtc_needs_modeset(new_crtc_state); if (old_crtc_state->inherited || intel_crtc_needs_modeset(new_crtc_state)) { if (HAS_DPT(i915)) intel_dpt_configure(crtc); } if (vrr_enabling(old_crtc_state, new_crtc_state)) { intel_vrr_enable(new_crtc_state); intel_crtc_update_active_timings(new_crtc_state, new_crtc_state->vrr.enable); } if (!modeset) { if (new_crtc_state->preload_luts && intel_crtc_needs_color_update(new_crtc_state)) intel_color_load_luts(new_crtc_state); intel_pre_plane_update(state, crtc); if (intel_crtc_needs_fastset(new_crtc_state)) intel_encoders_update_pipe(state, crtc); if (DISPLAY_VER(i915) >= 11 && intel_crtc_needs_fastset(new_crtc_state)) icl_set_pipe_chicken(new_crtc_state); } intel_fbc_update(state, crtc); drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); if (!modeset && intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_noarm(new_crtc_state); intel_crtc_planes_update_noarm(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); commit_pipe_pre_planes(state, crtc); intel_crtc_planes_update_arm(state, crtc); commit_pipe_post_planes(state, crtc); intel_pipe_update_end(new_crtc_state); /* * We usually enable FIFO underrun interrupts as part of the * CRTC enable sequence during modesets. But when we inherit a * valid pipe configuration from the BIOS we need to take care * of enabling them on the CRTC's first fastset. */ if (intel_crtc_needs_fastset(new_crtc_state) && !modeset && old_crtc_state->inherited) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } static void intel_old_crtc_state_disables(struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ intel_crtc_disable_pipe_crc(crtc); dev_priv->display.funcs.display->crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); if (!new_crtc_state->hw.active) intel_initial_watermarks(state, crtc); } static void intel_commit_modeset_disables(struct intel_atomic_state *state) { struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; u32 handled = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (!old_crtc_state->hw.active) continue; intel_pre_plane_update(state, crtc); intel_crtc_disable_planes(state, crtc); } /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (!old_crtc_state->hw.active) continue; /* In case of Transcoder port Sync master slave CRTCs can be * assigned in any order and we need to make sure that * slave CRTCs are disabled first and then master CRTC since * Slave vblanks are masked till Master Vblanks. */ if (!is_trans_port_sync_slave(old_crtc_state) && !intel_dp_mst_is_slave_trans(old_crtc_state) && !intel_crtc_is_bigjoiner_slave(old_crtc_state)) continue; intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); handled |= BIT(crtc->pipe); } /* Disable everything else left on */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state) || (handled & BIT(crtc->pipe))) continue; if (!old_crtc_state->hw.active) continue; intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); } } static void intel_commit_modeset_enables(struct intel_atomic_state *state) { struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!new_crtc_state->hw.active) continue; intel_enable_crtc(state, crtc); intel_update_crtc(state, crtc); } } static void skl_commit_modeset_enables(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; u8 update_pipes = 0, modeset_pipes = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if (!new_crtc_state->hw.active) continue; /* ignore allocations for crtc's that have been turned off. */ if (!intel_crtc_needs_modeset(new_crtc_state)) { entries[pipe] = old_crtc_state->wm.skl.ddb; update_pipes |= BIT(pipe); } else { modeset_pipes |= BIT(pipe); } } /* * Whenever the number of active pipes changes, we need to make sure we * update the pipes in the right order so that their ddb allocations * never overlap with each other between CRTC updates. Otherwise we'll * cause pipe underruns and other bad stuff. * * So first lets enable all pipes that do not need a fullmodeset as * those don't have any external dependency. */ while (update_pipes) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((update_pipes & BIT(pipe)) == 0) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, I915_MAX_PIPES, pipe)) continue; entries[pipe] = new_crtc_state->wm.skl.ddb; update_pipes &= ~BIT(pipe); intel_update_crtc(state, crtc); /* * If this is an already active pipe, it's DDB changed, * and this isn't the last pipe that needs updating * then we need to wait for a vblank to pass for the * new ddb allocation to take effect. */ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && (update_pipes | modeset_pipes)) intel_crtc_wait_for_next_vblank(crtc); } } update_pipes = modeset_pipes; /* * Enable all pipes that needs a modeset and do not depends on other * pipes */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((modeset_pipes & BIT(pipe)) == 0) continue; if (intel_dp_mst_is_slave_trans(new_crtc_state) || is_trans_port_sync_master(new_crtc_state) || intel_crtc_is_bigjoiner_master(new_crtc_state)) continue; modeset_pipes &= ~BIT(pipe); intel_enable_crtc(state, crtc); } /* * Then we enable all remaining pipes that depend on other * pipes: MST slaves and port sync masters, big joiner master */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((modeset_pipes & BIT(pipe)) == 0) continue; modeset_pipes &= ~BIT(pipe); intel_enable_crtc(state, crtc); } /* * Finally we do the plane updates/etc. for all pipes that got enabled. */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; if ((update_pipes & BIT(pipe)) == 0) continue; drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, I915_MAX_PIPES, pipe)); entries[pipe] = new_crtc_state->wm.skl.ddb; update_pipes &= ~BIT(pipe); intel_update_crtc(state, crtc); } drm_WARN_ON(&dev_priv->drm, modeset_pipes); drm_WARN_ON(&dev_priv->drm, update_pipes); } static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) { struct intel_atomic_state *state, *next; struct llist_node *freed; freed = llist_del_all(&dev_priv->display.atomic_helper.free_list); llist_for_each_entry_safe(state, next, freed, freed) drm_atomic_state_put(&state->base); } void intel_atomic_helper_free_state_worker(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); intel_atomic_helper_free_state(dev_priv); } static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) { struct wait_queue_entry wait_fence, wait_reset; struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); init_wait_entry(&wait_fence, 0); init_wait_entry(&wait_reset, 0); for (;;) { prepare_to_wait(&intel_state->commit_ready.wait, &wait_fence, TASK_UNINTERRUPTIBLE); prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET), &wait_reset, TASK_UNINTERRUPTIBLE); if (i915_sw_fence_done(&intel_state->commit_ready) || test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) break; schedule(); } finish_wait(&intel_state->commit_ready.wait, &wait_fence); finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET), &wait_reset); } static void intel_atomic_cleanup_work(struct work_struct *work) { struct intel_atomic_state *state = container_of(work, struct intel_atomic_state, base.commit_work); struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state; struct intel_crtc *crtc; int i; for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) intel_color_cleanup_commit(old_crtc_state); drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); drm_atomic_helper_commit_cleanup_done(&state->base); drm_atomic_state_put(&state->base); intel_atomic_helper_free_state(i915); } static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_plane *plane; struct intel_plane_state *plane_state; int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct drm_framebuffer *fb = plane_state->hw.fb; int cc_plane; int ret; if (!fb) continue; cc_plane = intel_fb_rc_ccs_cc_plane(fb); if (cc_plane < 0) continue; /* * The layout of the fast clear color value expected by HW * (the DRM ABI requiring this value to be located in fb at * offset 0 of cc plane, plane #2 previous generations or * plane #1 for flat ccs): * - 4 x 4 bytes per-channel value * (in surface type specific float/int format provided by the fb user) * - 8 bytes native color value used by the display * (converted/written by GPU during a fast clear operation using the * above per-channel values) * * The commit's FB prepare hook already ensured that FB obj is pinned and the * caller made sure that the object is synced wrt. the related color clear value * GPU write on it. */ ret = i915_gem_object_read_from_page(intel_fb_obj(fb), fb->offsets[cc_plane] + 16, &plane_state->ccval, sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ drm_WARN_ON(&i915->drm, ret); } } static void intel_atomic_commit_tail(struct intel_atomic_state *state) { struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; intel_wakeref_t wakeref = 0; int i; intel_atomic_commit_fence_wait(state); drm_atomic_helper_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base); /* * During full modesets we write a lot of registers, wait * for PLLs, etc. Doing that while DC states are enabled * is not a good idea. * * During fastsets and other updates we also need to * disable DC states due to the following scenario: * 1. DC5 exit and PSR exit happen * 2. Some or all _noarm() registers are written * 3. Due to some long delay PSR is re-entered * 4. DC5 entry -> DMC saves the already written new * _noarm() registers and the old not yet written * _arm() registers * 5. DC5 exit -> DMC restores a mixture of old and * new register values and arms the update * 6. PSR exit -> hardware latches a mixture of old and * new register values -> corrupted frame, or worse * 7. New _arm() registers are finally written * 8. Hardware finally latches a complete set of new * register values, and subsequent frames will be OK again * * Also note that due to the pipe CSC hardware issues on * SKL/GLK DC states must remain off until the pipe CSC * state readout has happened. Otherwise we risk corrupting * the CSC latched register values with the readout (see * skl_read_csc() and skl_color_commit_noarm()). */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); intel_atomic_prepare_plane_clear_colors(state); for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state) || intel_crtc_needs_fastset(new_crtc_state)) intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); } intel_commit_modeset_disables(state); /* FIXME: Eventually get rid of our crtc->config pointer */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) crtc->config = new_crtc_state; /* * In XE_LPD+ Pmdemand combines many parameters such as voltage index, * plls, cdclk frequency, QGV point selection parameter etc. Voltage * index, cdclk/ddiclk frequencies are supposed to be configured before * the cdclk config is set. */ intel_pmdemand_pre_plane_update(state); if (state->modeset) { drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); intel_set_cdclk_pre_plane_update(state); intel_modeset_verify_disabled(dev_priv, state); } intel_sagv_pre_plane_update(state); /* Complete the events for pipes that have now been disabled */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { bool modeset = intel_crtc_needs_modeset(new_crtc_state); /* Complete events for now disable pipes here. */ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { spin_lock_irq(&dev->event_lock); drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->uapi.event); spin_unlock_irq(&dev->event_lock); new_crtc_state->uapi.event = NULL; } } intel_encoders_update_prepare(state); intel_dbuf_pre_plane_update(state); intel_mbus_dbox_update(state); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->do_async_flip) intel_crtc_enable_flip_done(state, crtc); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.funcs.display->commit_modeset_enables(state); if (state->modeset) intel_set_cdclk_post_plane_update(state); intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To * fix this: * - wrap the optimization/post_plane_update stuff into a per-crtc work. * - schedule that vblank worker _before_ calling hw_done * - at the start of commit_tail, cancel it _synchrously * - switch over to the vblank wait helper in the core after that since * we don't need out special handling any more. */ drm_atomic_helper_wait_for_flip_done(dev, &state->base); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->do_async_flip) intel_crtc_disable_flip_done(state, crtc); } /* * Now that the vblank has passed, we can go ahead and program the * optimal watermarks on platforms that need two-step watermark * programming. * * TODO: Move this (and other cleanup) to an async worker eventually. */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { /* * Gen2 reports pipe underruns whenever all planes are disabled. * So re-enable underrun reporting after some planes get enabled. * * We do this before .optimize_watermarks() so that we have a * chance of catching underruns with the intermediate watermarks * vs. the new plane configuration. */ if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); intel_optimize_watermarks(state, crtc); } intel_dbuf_post_plane_update(state); intel_psr_post_plane_update(state); for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ hsw_ips_post_update(state, crtc); /* * Activate DRRS after state readout to avoid * dp_m_n vs. dp_m2_n2 confusion on BDW+. */ intel_drrs_activate(new_crtc_state); /* * DSB cleanup is done in cleanup_work aligning with framebuffer * cleanup. So copy and reset the dsb structure to sync with * commit_done and later do dsb cleanup in cleanup_work. * * FIXME get rid of this funny new->old swapping */ old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); } /* Underruns don't always raise interrupts, so check manually */ intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); if (state->modeset) intel_verify_planes(state); intel_sagv_post_plane_update(state); intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); if (state->modeset) { /* As one of the primary mmio accessors, KMS has a high * likelihood of triggering bugs in unclaimed access. After we * finish modesetting, see if an error has been flagged, and if * so enable debugging for the next modeset - and hope we catch * the culprit. */ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); } /* * Delay re-enabling DC states by 17 ms to avoid the off->on->off * toggling overhead at and above 60 FPS. */ intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not * impede the current task (userspace for blocking modesets) that * are executed inline. For out-of-line asynchronous modesets/flips, * deferring to a new worker seems overkill, but we would place a * schedule point (cond_resched()) here anyway to keep latencies * down. */ INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); queue_work(system_highpri_wq, &state->base.commit_work); } static void intel_atomic_commit_work(struct work_struct *work) { struct intel_atomic_state *state = container_of(work, struct intel_atomic_state, base.commit_work); intel_atomic_commit_tail(state); } static int intel_atomic_commit_ready(struct i915_sw_fence *fence, enum i915_sw_fence_notify notify) { struct intel_atomic_state *state = container_of(fence, struct intel_atomic_state, commit_ready); switch (notify) { case FENCE_COMPLETE: /* we do blocking waits in the worker, nothing to do here */ break; case FENCE_FREE: { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_atomic_helper *helper = &i915->display.atomic_helper; if (llist_add(&state->freed, &helper->free_list)) queue_work(i915->unordered_wq, &helper->free_work); break; } } return NOTIFY_DONE; } static void intel_atomic_track_fbs(struct intel_atomic_state *state) { struct intel_plane_state *old_plane_state, *new_plane_state; struct intel_plane *plane; int i; for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), to_intel_frontbuffer(new_plane_state->hw.fb), plane->frontbuffer_bit); } int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) { struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); drm_atomic_state_get(&state->base); i915_sw_fence_init(&state->commit_ready, intel_atomic_commit_ready); /* * The intel_legacy_cursor_update() fast path takes care * of avoiding the vblank waits for simple cursor * movement and flips. For cursor on/off and size changes, * we want to perform the vblank waits so that watermark * updates happen during the correct frames. Gen9+ have * double buffered watermarks and so shouldn't need this. * * Unset state->legacy_cursor_update before the call to * drm_atomic_helper_setup_commit() because otherwise * drm_atomic_helper_wait_for_flip_done() is a noop and * we get FIFO underruns because we didn't wait * for vblank. * * FIXME doing watermarks and fb cleanup from a vblank worker * (assuming we had any) would solve these problems. */ if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) if (new_crtc_state->wm.need_postvbl_update || new_crtc_state->update_wm_post) state->base.legacy_cursor_update = false; } ret = intel_atomic_prepare_commit(state); if (ret) { drm_dbg_atomic(&dev_priv->drm, "Preparing state failed with %i\n", ret); i915_sw_fence_commit(&state->commit_ready); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } ret = drm_atomic_helper_setup_commit(&state->base, nonblock); if (!ret) ret = drm_atomic_helper_swap_state(&state->base, true); if (!ret) intel_atomic_swap_global_state(state); if (ret) { struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; i915_sw_fence_commit(&state->commit_ready); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) intel_color_cleanup_commit(new_crtc_state); drm_atomic_helper_cleanup_planes(dev, &state->base); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); drm_atomic_state_get(&state->base); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); i915_sw_fence_commit(&state->commit_ready); if (nonblock && state->modeset) { queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); } else if (nonblock) { queue_work(dev_priv->display.wq.flip, &state->base.commit_work); } else { if (state->modeset) flush_workqueue(dev_priv->display.wq.modeset); intel_atomic_commit_tail(state); } return 0; } /** * intel_plane_destroy - destroy a plane * @plane: plane to destroy * * Common destruction function for all types of planes (primary, cursor, * sprite). */ void intel_plane_destroy(struct drm_plane *plane) { drm_plane_cleanup(plane); kfree(to_intel_plane(plane)); } int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); if (!drmmode_crtc) return -ENOENT; crtc = to_intel_crtc(drmmode_crtc); pipe_from_crtc_id->pipe = crtc->pipe; return 0; } static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_encoder *source_encoder; u32 possible_clones = 0; for_each_intel_encoder(dev, source_encoder) { if (encoders_cloneable(encoder, source_encoder)) possible_clones |= drm_encoder_mask(&source_encoder->base); } return possible_clones; } static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_crtc *crtc; u32 possible_crtcs = 0; for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) possible_crtcs |= drm_crtc_mask(&crtc->base); return possible_crtcs; } static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) { if (!IS_MOBILE(dev_priv)) return false; if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) return false; if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) return false; return true; } static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 9) return false; if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) return false; if (HAS_PCH_LPT_H(dev_priv) && intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) return false; /* DDI E can't be used if DDI A requires 4 lanes */ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; if (!dev_priv->display.vbt.int_crt_support) return false; return true; } bool assert_port_valid(struct drm_i915_private *i915, enum port port) { return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), "Platform does not support port %c\n", port_name(port)); } void intel_setup_outputs(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; bool dpd_is_edp = false; intel_pps_unlock_regs_wa(dev_priv); if (!HAS_DISPLAY(dev_priv)) return; if (HAS_DDI(dev_priv)) { if (intel_ddi_crt_present(dev_priv)) intel_crt_init(dev_priv); intel_bios_for_each_encoder(dev_priv, intel_ddi_init); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) vlv_dsi_init(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; /* * intel_edp_init_connector() depends on this completing first, * to prevent the registration of both eDP and LVDS and the * incorrect sharing of the PPS. */ intel_lvds_init(dev_priv); intel_crt_init(dev_priv); dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); if (ilk_has_edp_a(dev_priv)) g4x_dp_init(dev_priv, DP_A, PORT_A); if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); if (!found) g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); } if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) intel_crt_init(dev_priv); /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC * (no way to plug in a DP->HDMI dongle) the DDC pins for * eDP ports may have been muxed to an alternate function. * Thus we can't rely on the DP_DETECTED bit alone to detect * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. * * Sadly the straps seem to be missing sometimes even for HDMI * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap * and VBT for the presence of the port. Additionally we can't * trust the port type the VBT declares as we've seen at least * HDMI ports that the VBT claim are DP or eDP. */ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); if (IS_CHERRYVIEW(dev_priv)) { /* * eDP not supported on port D, * so no need to worry about it */ has_port = intel_bios_is_port_present(dev_priv, PORT_D); if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); } vlv_dsi_init(dev_priv); } else if (IS_PINEVIEW(dev_priv)) { intel_lvds_init(dev_priv); intel_crt_init(dev_priv); } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { bool found = false; if (IS_MOBILE(dev_priv)) intel_lvds_init(dev_priv); intel_crt_init(dev_priv); if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); if (!found && IS_G4X(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "probing HDMI on SDVOB\n"); g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); } if (!found && IS_G4X(dev_priv)) g4x_dp_init(dev_priv, DP_B, PORT_B); } /* Before G4X SDVOC doesn't have its own detect register */ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); } if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { if (IS_G4X(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "probing HDMI on SDVOC\n"); g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); } if (IS_G4X(dev_priv)) g4x_dp_init(dev_priv, DP_C, PORT_C); } if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) g4x_dp_init(dev_priv, DP_D, PORT_D); if (SUPPORTS_TV(dev_priv)) intel_tv_init(dev_priv); } else if (DISPLAY_VER(dev_priv) == 2) { if (IS_I85X(dev_priv)) intel_lvds_init(dev_priv); intel_crt_init(dev_priv); intel_dvo_init(dev_priv); } for_each_intel_encoder(&dev_priv->drm, encoder) { encoder->base.possible_crtcs = intel_encoder_possible_crtcs(encoder); encoder->base.possible_clones = intel_encoder_possible_clones(encoder); } intel_init_pch_refclk(dev_priv); drm_helper_move_panel_connectors_to_head(&dev_priv->drm); } static int max_dotclock(struct drm_i915_private *i915) { int max_dotclock = i915->max_dotclk_freq; /* icl+ might use bigjoiner */ if (DISPLAY_VER(i915) >= 11) max_dotclock *= 2; return max_dotclock; } enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); int hdisplay_max, htotal_max; int vdisplay_max, vtotal_max; /* * Can't reject DBLSCAN here because Xorg ddxen can add piles * of DBLSCAN modes to the output's mode list when they detect * the scaling mode property on the connector. And they don't * ask the kernel to validate those modes in any way until * modeset time at which point the client gets a protocol error. * So in order to not upset those clients we silently ignore the * DBLSCAN flag on such connectors. For other connectors we will * reject modes with the DBLSCAN flag in encoder->compute_config(). * And we always reject DBLSCAN modes in connector->mode_valid() * as we never want such modes on the connector's mode list. */ if (mode->vscan > 1) return MODE_NO_VSCAN; if (mode->flags & DRM_MODE_FLAG_HSKEW) return MODE_H_ILLEGAL; if (mode->flags & (DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC | DRM_MODE_FLAG_PCSYNC)) return MODE_HSYNC; if (mode->flags & (DRM_MODE_FLAG_BCAST | DRM_MODE_FLAG_PIXMUX | DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; /* * Reject clearly excessive dotclocks early to * avoid having to worry about huge integers later. */ if (mode->clock > max_dotclock(dev_priv)) return MODE_CLOCK_HIGH; /* Transcoder timing limits */ if (DISPLAY_VER(dev_priv) >= 11) { hdisplay_max = 16384; vdisplay_max = 8192; htotal_max = 16384; vtotal_max = 8192; } else if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ vdisplay_max = 4096; htotal_max = 8192; vtotal_max = 8192; } else if (DISPLAY_VER(dev_priv) >= 3) { hdisplay_max = 4096; vdisplay_max = 4096; htotal_max = 8192; vtotal_max = 8192; } else { hdisplay_max = 2048; vdisplay_max = 2048; htotal_max = 4096; vtotal_max = 4096; } if (mode->hdisplay > hdisplay_max || mode->hsync_start > htotal_max || mode->hsync_end > htotal_max || mode->htotal > htotal_max) return MODE_H_ILLEGAL; if (mode->vdisplay > vdisplay_max || mode->vsync_start > vtotal_max || mode->vsync_end > vtotal_max || mode->vtotal > vtotal_max) return MODE_V_ILLEGAL; if (DISPLAY_VER(dev_priv) >= 5) { if (mode->hdisplay < 64 || mode->htotal - mode->hdisplay < 32) return MODE_H_ILLEGAL; if (mode->vtotal - mode->vdisplay < 5) return MODE_V_ILLEGAL; } else { if (mode->htotal - mode->hdisplay < 32) return MODE_H_ILLEGAL; if (mode->vtotal - mode->vdisplay < 3) return MODE_V_ILLEGAL; } /* * Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && mode->hsync_start == mode->hdisplay) return MODE_H_ILLEGAL; return MODE_OK; } enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, const struct drm_display_mode *mode, bool bigjoiner) { int plane_width_max, plane_height_max; /* * intel_mode_valid() should be * sufficient on older platforms. */ if (DISPLAY_VER(dev_priv) < 9) return MODE_OK; /* * Most people will probably want a fullscreen * plane so let's not advertize modes that are * too big for that. */ if (DISPLAY_VER(dev_priv) >= 11) { plane_width_max = 5120 << bigjoiner; plane_height_max = 4320; } else { plane_width_max = 5120; plane_height_max = 4096; } if (mode->hdisplay > plane_width_max) return MODE_H_ILLEGAL; if (mode->vdisplay > plane_height_max) return MODE_V_ILLEGAL; return MODE_OK; } static const struct intel_display_funcs skl_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, .commit_modeset_enables = skl_commit_modeset_enables, .get_initial_plane_config = skl_get_initial_plane_config, }; static const struct intel_display_funcs ddi_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, }; static const struct intel_display_funcs pch_split_display_funcs = { .get_pipe_config = ilk_get_pipe_config, .crtc_enable = ilk_crtc_enable, .crtc_disable = ilk_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, }; static const struct intel_display_funcs vlv_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = valleyview_crtc_enable, .crtc_disable = i9xx_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, }; static const struct intel_display_funcs i9xx_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = i9xx_crtc_enable, .crtc_disable = i9xx_crtc_disable, .commit_modeset_enables = intel_commit_modeset_enables, .get_initial_plane_config = i9xx_get_initial_plane_config, }; /** * intel_init_display_hooks - initialize the display modesetting hooks * @dev_priv: device private */ void intel_init_display_hooks(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 9) { dev_priv->display.funcs.display = &skl_display_funcs; } else if (HAS_DDI(dev_priv)) { dev_priv->display.funcs.display = &ddi_display_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { dev_priv->display.funcs.display = &pch_split_display_funcs; } else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) { dev_priv->display.funcs.display = &vlv_display_funcs; } else { dev_priv->display.funcs.display = &i9xx_display_funcs; } } int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; struct drm_modeset_acquire_ctx ctx; struct intel_crtc *crtc; int ret = 0; state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; drm_modeset_acquire_init(&ctx, 0); state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; retry: for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } if (crtc_state->hw.active) { struct intel_encoder *encoder; ret = drm_atomic_add_affected_planes(state, &crtc->base); if (ret) goto out; /* * FIXME hack to force a LUT update to avoid the * plane update forcing the pipe gamma on without * having a proper LUT loaded. Remove once we * have readout for pipe gamma enable. */ crtc_state->uapi.color_mgmt_changed = true; for_each_intel_encoder_mask(dev, encoder, crtc_state->uapi.encoder_mask) { if (encoder->initial_fastset_check && !encoder->initial_fastset_check(encoder, crtc_state)) { ret = drm_atomic_add_affected_connectors(state, &crtc->base); if (ret) goto out; } } } } ret = drm_atomic_commit(state); out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; } void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); enum transcoder cpu_transcoder = (enum transcoder)pipe; /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, .m2 = 7, .p1 = 13, .p2 = 4, .n = 2, }; u32 dpll, fp; int i; drm_WARN_ON(&dev_priv->drm, i9xx_calc_dpll_params(48000, &clock) != 25154); drm_dbg_kms(&dev_priv->drm, "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", pipe_name(pipe), clock.vco, clock.dot); fp = i9xx_dpll_compute_fp(&clock); dpll = DPLL_DVO_2X_MODE | DPLL_VGA_MODE_DIS | ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | PLL_P2_DIVIDE_BY_4 | PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), HACTIVE(640 - 1) | HTOTAL(800 - 1)); intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), VACTIVE(480 - 1) | VTOTAL(525 - 1)); intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); intel_de_write(dev_priv, PIPESRC(pipe), PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); intel_de_write(dev_priv, FP0(pipe), fp); intel_de_write(dev_priv, FP1(pipe), fp); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); intel_de_write(dev_priv, DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ intel_de_write(dev_priv, DPLL(pipe), dpll); /* We do this three times for luck */ for (i = 0; i < 3 ; i++) { intel_de_write(dev_priv, DPLL(pipe), dpll); intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* wait for warmup */ } intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); } void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); intel_de_write(dev_priv, TRANSCONF(pipe), 0); intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_stopped(crtc); intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); intel_de_posting_read(dev_priv, DPLL(pipe)); } void intel_hpd_poll_fini(struct drm_i915_private *i915) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; /* Kill all the work that may have been queued by hpd. */ drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); if (connector->hdcp.shim) { cancel_delayed_work_sync(&connector->hdcp.check_work); cancel_work_sync(&connector->hdcp.prop_work); } } drm_connector_list_iter_end(&conn_iter); } bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_display.c
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/async.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/vga_switcheroo.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_mman.h" #include "i915_drv.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_fbdev.h" #include "intel_frontbuffer.h" struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; struct i915_vma *vma; unsigned long vma_flags; async_cookie_t cookie; int preferred_bpp; /* Whether or not fbdev hpd processing is temporarily suspended */ bool hpd_suspended: 1; /* Set when a hotplug was received while HPD processing was suspended */ bool hpd_waiting: 1; /* Protects hpd_suspended */ struct mutex hpd_lock; }; static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper) { return container_of(fb_helper, struct intel_fbdev, helper); } static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev) { return ifbdev->fb->frontbuffer; } static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) { intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU); } FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(intel_fbdev, drm_fb_helper_damage_range, drm_fb_helper_damage_area) static int intel_fbdev_set_par(struct fb_info *info) { struct intel_fbdev *ifbdev = to_intel_fbdev(info->par); int ret; ret = drm_fb_helper_set_par(info); if (ret == 0) intel_fbdev_invalidate(ifbdev); return ret; } static int intel_fbdev_blank(int blank, struct fb_info *info) { struct intel_fbdev *ifbdev = to_intel_fbdev(info->par); int ret; ret = drm_fb_helper_blank(blank, info); if (ret == 0) intel_fbdev_invalidate(ifbdev); return ret; } static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct intel_fbdev *ifbdev = to_intel_fbdev(info->par); int ret; ret = drm_fb_helper_pan_display(var, info); if (ret == 0) intel_fbdev_invalidate(ifbdev); return ret; } static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct intel_fbdev *fbdev = to_intel_fbdev(info->par); struct drm_gem_object *bo = drm_gem_fb_get_obj(&fbdev->fb->base, 0); struct drm_i915_gem_object *obj = to_intel_bo(bo); return i915_gem_fb_mmap(obj, vma); } static const struct fb_ops intelfb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev), DRM_FB_HELPER_DEFAULT_OPS, .fb_set_par = intel_fbdev_set_par, .fb_blank = intel_fbdev_blank, .fb_pan_display = intel_fbdev_pan_display, __FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev), .fb_mmap = intel_fbdev_mmap, }; static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = to_intel_fbdev(helper); struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; int size; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(sizes->surface_bpp, 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); if (HAS_LMEM(dev_priv)) { obj = i915_gem_object_create_lmem(dev_priv, size, I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_USER); } else { /* * If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. * * Also skip stolen on MTL as Wa_22018444074 mitigation. */ if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); } if (IS_ERR(obj)) { drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj); return PTR_ERR(obj); } fb = intel_framebuffer_create(obj, &mode_cmd); i915_gem_object_put(obj); if (IS_ERR(fb)) return PTR_ERR(fb); ifbdev->fb = to_intel_framebuffer(fb); return 0; } static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct intel_fbdev *ifbdev = to_intel_fbdev(helper); struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt; const struct i915_gtt_view view = { .type = I915_GTT_VIEW_NORMAL, }; intel_wakeref_t wakeref; struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; void __iomem *vaddr; struct drm_i915_gem_object *obj; struct i915_gem_ww_ctx ww; int ret; mutex_lock(&ifbdev->hpd_lock); ret = ifbdev->hpd_suspended ? -EAGAIN : 0; mutex_unlock(&ifbdev->hpd_lock); if (ret) return ret; if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { drm_dbg_kms(&dev_priv->drm, "BIOS fb too small (%dx%d), we require (%dx%d)," " releasing it\n", intel_fb->base.width, intel_fb->base.height, sizes->fb_width, sizes->fb_height); drm_framebuffer_put(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) { drm_dbg_kms(&dev_priv->drm, "no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) return ret; intel_fb = ifbdev->fb; } else { drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n"); prealloc = true; sizes->fb_width = intel_fb->base.width; sizes->fb_height = intel_fb->base.height; } wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the * BIOS is suitable for own access. */ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false, &view, false, &flags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out_unlock; } info = drm_fb_helper_alloc_info(helper); if (IS_ERR(info)) { drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info); ret = PTR_ERR(info); goto out_unpin; } ifbdev->helper.fb = &ifbdev->fb->base; info->fbops = &intelfb_ops; obj = intel_fb_obj(&intel_fb->base); if (i915_gem_object_is_lmem(obj)) { struct intel_memory_region *mem = obj->mm.region; /* Use fbdev's framebuffer from lmem for discrete */ info->fix.smem_start = (unsigned long)(mem->io_start + i915_gem_object_get_dma_address(obj, 0)); info->fix.smem_len = obj->base.size; } else { /* Our framebuffer is the entirety of fbdev's system memory */ info->fix.smem_start = (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma)); info->fix.smem_len = vma->size; } for_i915_gem_ww(&ww, ret, false) { ret = i915_gem_object_lock(vma->obj, &ww); if (ret) continue; vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { drm_err(&dev_priv->drm, "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); ret = PTR_ERR(vaddr); continue; } } if (ret) goto out_unpin; info->screen_base = vaddr; info->screen_size = vma->size; drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); /* If the object is shmemfs backed, it will have given us zeroed pages. * If the object is stolen however, it will be full of whatever * garbage was left in there. */ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n", ifbdev->fb->base.width, ifbdev->fb->base.height, i915_ggtt_offset(vma)); ifbdev->vma = vma; ifbdev->vma_flags = flags; intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); vga_switcheroo_client_fb_set(pdev, info); return 0; out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) { if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) return 0; if (helper->fb->funcs->dirty) return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); return 0; } static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intelfb_create, .fb_dirty = intelfb_dirty, }; static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) { /* We rely on the object-free to release the VMA pinning for * the info->screen_base mmaping. Leaking the VMA is simpler than * trying to rectify all the possible error paths leading here. */ drm_fb_helper_fini(&ifbdev->helper); if (ifbdev->vma) intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); if (ifbdev->fb) drm_framebuffer_remove(&ifbdev->fb->base); drm_fb_helper_unprepare(&ifbdev->helper); kfree(ifbdev); } /* * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. * The core display code will have read out the current plane configuration, * so we use that to figure out if there's an object for us to use as the * fb, and if so, we re-use it for the fbdev configuration. * * Note we only support a single fb shared across pipes for boot (mostly for * fbcon), so we just find the biggest and use that. */ static bool intel_fbdev_init_bios(struct drm_device *dev, struct intel_fbdev *ifbdev) { struct drm_i915_private *i915 = to_i915(dev); struct intel_framebuffer *fb = NULL; struct intel_crtc *crtc; unsigned int max_size = 0; /* Find the largest fb */ for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); struct drm_i915_gem_object *obj = intel_fb_obj(plane_state->uapi.fb); if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] not active, skipping\n", crtc->base.base.id, crtc->base.name); continue; } if (!obj) { drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] no fb, skipping\n", plane->base.base.id, plane->base.name); continue; } if (obj->base.size > max_size) { drm_dbg_kms(&i915->drm, "found possible fb from [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fb = to_intel_framebuffer(plane_state->uapi.fb); max_size = obj->base.size; } } if (!fb) { drm_dbg_kms(&i915->drm, "no active fbs found, not using BIOS config\n"); goto out; } /* Now make sure all the pipes will fit into it */ for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); unsigned int cur_size; if (!crtc_state->uapi.active) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] not active, skipping\n", crtc->base.base.id, crtc->base.name); continue; } drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n", plane->base.base.id, plane->base.name); /* * See if the plane fb we found above will fit on this * pipe. Note we need to use the selected fb's pitch and bpp * rather than the current pipe's, since they differ. */ cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay; cur_size = cur_size * fb->base.format->cpp[0]; if (fb->base.pitches[0] < cur_size) { drm_dbg_kms(&i915->drm, "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n", plane->base.base.id, plane->base.name, cur_size, fb->base.pitches[0]); fb = NULL; break; } cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(&fb->base, 0, cur_size); cur_size *= fb->base.pitches[0]; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n", crtc->base.base.id, crtc->base.name, crtc_state->uapi.adjusted_mode.crtc_hdisplay, crtc_state->uapi.adjusted_mode.crtc_vdisplay, fb->base.format->cpp[0] * 8, cur_size); if (cur_size > max_size) { drm_dbg_kms(&i915->drm, "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n", plane->base.base.id, plane->base.name, cur_size, max_size); fb = NULL; break; } drm_dbg_kms(&i915->drm, "fb big enough [PLANE:%d:%s] (%d >= %d)\n", plane->base.base.id, plane->base.name, max_size, cur_size); } if (!fb) { drm_dbg_kms(&i915->drm, "BIOS fb not suitable for all pipes, not using\n"); goto out; } ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8; ifbdev->fb = fb; drm_framebuffer_get(&ifbdev->fb->base); /* Final pass to check if any active pipes don't have fbs */ for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); if (!crtc_state->uapi.active) continue; drm_WARN(dev, !plane_state->uapi.fb, "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); } drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n"); return true; out: return false; } static void intel_fbdev_suspend_worker(struct work_struct *work) { intel_fbdev_set_suspend(&container_of(work, struct drm_i915_private, display.fbdev.suspend_work)->drm, FBINFO_STATE_RUNNING, true); } int intel_fbdev_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev; int ret; if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); if (ifbdev == NULL) return -ENOMEM; mutex_init(&ifbdev->hpd_lock); drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs); if (intel_fbdev_init_bios(dev, ifbdev)) ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp; else ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp; ret = drm_fb_helper_init(dev, &ifbdev->helper); if (ret) { kfree(ifbdev); return ret; } dev_priv->display.fbdev.fbdev = ifbdev; INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker); return 0; } static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) { struct intel_fbdev *ifbdev = data; /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper)) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); } void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) { struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); } static void intel_fbdev_sync(struct intel_fbdev *ifbdev) { if (!ifbdev->cookie) return; /* Only serialises with all preceding async calls, hence +1 */ async_synchronize_cookie(ifbdev->cookie + 1); ifbdev->cookie = 0; } void intel_fbdev_unregister(struct drm_i915_private *dev_priv) { struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true); if (!current_is_async()) intel_fbdev_sync(ifbdev); drm_fb_helper_unregister_info(&ifbdev->helper); } void intel_fbdev_fini(struct drm_i915_private *dev_priv) { struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev); if (!ifbdev) return; intel_fbdev_destroy(ifbdev); } /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD * processing, fbdev will perform a full connector reprobe if a hotplug event * was received while HPD was suspended. */ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state) { struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev; bool send_hpd = false; mutex_lock(&ifbdev->hpd_lock); ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; ifbdev->hpd_waiting = false; mutex_unlock(&ifbdev->hpd_lock); if (send_hpd) { drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n"); drm_fb_helper_hotplug_event(&ifbdev->helper); } } void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; struct fb_info *info; if (!ifbdev) return; if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv))) return; if (!ifbdev->vma) goto set_suspend; info = ifbdev->helper.info; if (synchronous) { /* Flush any pending work to turn the console on, and then * wait to turn it off. It must be synchronous as we are * about to suspend or unload the driver. * * Note that from within the work-handler, we cannot flush * ourselves, so only flush outstanding work upon suspend! */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->display.fbdev.suspend_work); console_lock(); } else { /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING); if (!console_trylock()) { /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. */ queue_work(dev_priv->unordered_wq, &dev_priv->display.fbdev.suspend_work); return; } } /* On resume from hibernation: If the object is shmemfs backed, it has * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ if (state == FBINFO_STATE_RUNNING && !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base))) memset_io(info->screen_base, 0, info->screen_size); drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); set_suspend: intel_fbdev_hpd_set_suspend(dev_priv, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; bool send_hpd; if (!ifbdev) return; intel_fbdev_sync(ifbdev); mutex_lock(&ifbdev->hpd_lock); send_hpd = !ifbdev->hpd_suspended; ifbdev->hpd_waiting = true; mutex_unlock(&ifbdev->hpd_lock); if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); } void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) { struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; intel_fbdev_sync(ifbdev); if (!ifbdev->vma) return; if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) intel_fbdev_invalidate(ifbdev); } struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) { if (!fbdev || !fbdev->helper.fb) return NULL; return to_intel_framebuffer(fbdev->helper.fb); }
linux-master
drivers/gpu/drm/i915/display/intel_fbdev.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include "i915_drv.h" #include "intel_crtc.h" #include "intel_display_types.h" #include "intel_sprite_uapi.h" static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) { return DISPLAY_VER(dev_priv) >= 9; } static void intel_plane_set_ckey(struct intel_plane_state *plane_state, const struct drm_intel_sprite_colorkey *set) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_intel_sprite_colorkey *key = &plane_state->ckey; *key = *set; /* * We want src key enabled on the * sprite and not on the primary. */ if (plane->id == PLANE_PRIMARY && set->flags & I915_SET_COLORKEY_SOURCE) key->flags = 0; /* * On SKL+ we want dst key enabled on * the primary and not on the sprite. */ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && set->flags & I915_SET_COLORKEY_DESTINATION) key->flags = 0; } int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_intel_sprite_colorkey *set = data; struct drm_plane *plane; struct drm_plane_state *plane_state; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; int ret = 0; /* ignore the pointless "none" flag */ set->flags &= ~I915_SET_COLORKEY_NONE; if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; /* Make sure we don't try to enable both src & dest simultaneously */ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && set->flags & I915_SET_COLORKEY_DESTINATION) return -EINVAL; plane = drm_plane_find(dev, file_priv, set->plane_id); if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) return -ENOENT; /* * SKL+ only plane 2 can do destination keying against plane 1. * Also multiple planes can't do destination keying on the same * pipe simultaneously. */ if (DISPLAY_VER(dev_priv) >= 9 && to_intel_plane(plane)->id >= PLANE_SPRITE1 && set->flags & I915_SET_COLORKEY_DESTINATION) return -EINVAL; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(plane->dev); if (!state) { ret = -ENOMEM; goto out; } state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; while (1) { plane_state = drm_atomic_get_plane_state(state, plane); ret = PTR_ERR_OR_ZERO(plane_state); if (!ret) intel_plane_set_ckey(to_intel_plane_state(plane_state), set); /* * On some platforms we have to configure * the dst colorkey on the primary plane. */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, crtc->base.primary); ret = PTR_ERR_OR_ZERO(plane_state); if (!ret) intel_plane_set_ckey(to_intel_plane_state(plane_state), set); } if (!ret) ret = drm_atomic_commit(state); if (ret != -EDEADLK) break; drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); } drm_atomic_state_put(state); out: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; }
linux-master
drivers/gpu/drm/i915/display/intel_sprite_uapi.c
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2019 Intel Corporation */ #include <linux/string_helpers.h> #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" #include "intel_combo_phy.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_power_map.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pmdemand.h" #include "intel_pps_regs.h" #include "intel_snps_phy.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" #include "vlv_sideband.h" #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ for_each_power_well(__dev_priv, __power_well) \ for_each_if(test_bit((__domain), (__power_well)->domains.bits)) #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ for_each_power_well_reverse(__dev_priv, __power_well) \ for_each_if(test_bit((__domain), (__power_well)->domains.bits)) const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return "DISPLAY_CORE"; case POWER_DOMAIN_PIPE_A: return "PIPE_A"; case POWER_DOMAIN_PIPE_B: return "PIPE_B"; case POWER_DOMAIN_PIPE_C: return "PIPE_C"; case POWER_DOMAIN_PIPE_D: return "PIPE_D"; case POWER_DOMAIN_PIPE_PANEL_FITTER_A: return "PIPE_PANEL_FITTER_A"; case POWER_DOMAIN_PIPE_PANEL_FITTER_B: return "PIPE_PANEL_FITTER_B"; case POWER_DOMAIN_PIPE_PANEL_FITTER_C: return "PIPE_PANEL_FITTER_C"; case POWER_DOMAIN_PIPE_PANEL_FITTER_D: return "PIPE_PANEL_FITTER_D"; case POWER_DOMAIN_TRANSCODER_A: return "TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: return "TRANSCODER_B"; case POWER_DOMAIN_TRANSCODER_C: return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_D: return "TRANSCODER_D"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; case POWER_DOMAIN_TRANSCODER_DSI_A: return "TRANSCODER_DSI_A"; case POWER_DOMAIN_TRANSCODER_DSI_C: return "TRANSCODER_DSI_C"; case POWER_DOMAIN_TRANSCODER_VDSC_PW2: return "TRANSCODER_VDSC_PW2"; case POWER_DOMAIN_PORT_DDI_LANES_A: return "PORT_DDI_LANES_A"; case POWER_DOMAIN_PORT_DDI_LANES_B: return "PORT_DDI_LANES_B"; case POWER_DOMAIN_PORT_DDI_LANES_C: return "PORT_DDI_LANES_C"; case POWER_DOMAIN_PORT_DDI_LANES_D: return "PORT_DDI_LANES_D"; case POWER_DOMAIN_PORT_DDI_LANES_E: return "PORT_DDI_LANES_E"; case POWER_DOMAIN_PORT_DDI_LANES_F: return "PORT_DDI_LANES_F"; case POWER_DOMAIN_PORT_DDI_LANES_TC1: return "PORT_DDI_LANES_TC1"; case POWER_DOMAIN_PORT_DDI_LANES_TC2: return "PORT_DDI_LANES_TC2"; case POWER_DOMAIN_PORT_DDI_LANES_TC3: return "PORT_DDI_LANES_TC3"; case POWER_DOMAIN_PORT_DDI_LANES_TC4: return "PORT_DDI_LANES_TC4"; case POWER_DOMAIN_PORT_DDI_LANES_TC5: return "PORT_DDI_LANES_TC5"; case POWER_DOMAIN_PORT_DDI_LANES_TC6: return "PORT_DDI_LANES_TC6"; case POWER_DOMAIN_PORT_DDI_IO_A: return "PORT_DDI_IO_A"; case POWER_DOMAIN_PORT_DDI_IO_B: return "PORT_DDI_IO_B"; case POWER_DOMAIN_PORT_DDI_IO_C: return "PORT_DDI_IO_C"; case POWER_DOMAIN_PORT_DDI_IO_D: return "PORT_DDI_IO_D"; case POWER_DOMAIN_PORT_DDI_IO_E: return "PORT_DDI_IO_E"; case POWER_DOMAIN_PORT_DDI_IO_F: return "PORT_DDI_IO_F"; case POWER_DOMAIN_PORT_DDI_IO_TC1: return "PORT_DDI_IO_TC1"; case POWER_DOMAIN_PORT_DDI_IO_TC2: return "PORT_DDI_IO_TC2"; case POWER_DOMAIN_PORT_DDI_IO_TC3: return "PORT_DDI_IO_TC3"; case POWER_DOMAIN_PORT_DDI_IO_TC4: return "PORT_DDI_IO_TC4"; case POWER_DOMAIN_PORT_DDI_IO_TC5: return "PORT_DDI_IO_TC5"; case POWER_DOMAIN_PORT_DDI_IO_TC6: return "PORT_DDI_IO_TC6"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: return "PORT_CRT"; case POWER_DOMAIN_PORT_OTHER: return "PORT_OTHER"; case POWER_DOMAIN_VGA: return "VGA"; case POWER_DOMAIN_AUDIO_MMIO: return "AUDIO_MMIO"; case POWER_DOMAIN_AUDIO_PLAYBACK: return "AUDIO_PLAYBACK"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; case POWER_DOMAIN_AUX_IO_B: return "AUX_IO_B"; case POWER_DOMAIN_AUX_IO_C: return "AUX_IO_C"; case POWER_DOMAIN_AUX_IO_D: return "AUX_IO_D"; case POWER_DOMAIN_AUX_IO_E: return "AUX_IO_E"; case POWER_DOMAIN_AUX_IO_F: return "AUX_IO_F"; case POWER_DOMAIN_AUX_A: return "AUX_A"; case POWER_DOMAIN_AUX_B: return "AUX_B"; case POWER_DOMAIN_AUX_C: return "AUX_C"; case POWER_DOMAIN_AUX_D: return "AUX_D"; case POWER_DOMAIN_AUX_E: return "AUX_E"; case POWER_DOMAIN_AUX_F: return "AUX_F"; case POWER_DOMAIN_AUX_USBC1: return "AUX_USBC1"; case POWER_DOMAIN_AUX_USBC2: return "AUX_USBC2"; case POWER_DOMAIN_AUX_USBC3: return "AUX_USBC3"; case POWER_DOMAIN_AUX_USBC4: return "AUX_USBC4"; case POWER_DOMAIN_AUX_USBC5: return "AUX_USBC5"; case POWER_DOMAIN_AUX_USBC6: return "AUX_USBC6"; case POWER_DOMAIN_AUX_TBT1: return "AUX_TBT1"; case POWER_DOMAIN_AUX_TBT2: return "AUX_TBT2"; case POWER_DOMAIN_AUX_TBT3: return "AUX_TBT3"; case POWER_DOMAIN_AUX_TBT4: return "AUX_TBT4"; case POWER_DOMAIN_AUX_TBT5: return "AUX_TBT5"; case POWER_DOMAIN_AUX_TBT6: return "AUX_TBT6"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: return "INIT"; case POWER_DOMAIN_MODESET: return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; case POWER_DOMAIN_DC_OFF: return "DC_OFF"; case POWER_DOMAIN_TC_COLD_OFF: return "TC_COLD_OFF"; default: MISSING_CASE(domain); return "?"; } } /** * __intel_display_power_is_enabled - unlocked check for a power domain * @dev_priv: i915 device instance * @domain: power domain to check * * This is the unlocked version of intel_display_power_is_enabled() and should * only be used from error capture and recovery code where deadlocks are * possible. * * Returns: * True when the power domain is enabled, false otherwise. */ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_well *power_well; bool is_enabled; if (dev_priv->runtime_pm.suspended) return false; is_enabled = true; for_each_power_domain_well_reverse(dev_priv, power_well, domain) { if (intel_power_well_is_always_on(power_well)) continue; if (!intel_power_well_is_enabled_cached(power_well)) { is_enabled = false; break; } } return is_enabled; } /** * intel_display_power_is_enabled - check for a power domain * @dev_priv: i915 device instance * @domain: power domain to check * * This function can be used to check the hw power domain state. It is mostly * used in hardware state readout functions. Everywhere else code should rely * upon explicit power domain reference counting to ensure that the hardware * block is powered up before accessing it. * * Callers must hold the relevant modesetting locks to ensure that concurrent * threads can't disable the power well while the caller tries to read a few * registers. * * Returns: * True when the power domain is enabled, false otherwise. */ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains; bool ret; power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); ret = __intel_display_power_is_enabled(dev_priv, domain); mutex_unlock(&power_domains->lock); return ret; } static u32 sanitize_target_dc_state(struct drm_i915_private *i915, u32 target_dc_state) { struct i915_power_domains *power_domains = &i915->display.power.domains; static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, DC_STATE_EN_DC3CO, DC_STATE_DISABLE, }; int i; for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { if (target_dc_state != states[i]) continue; if (power_domains->allowed_dc_mask & target_dc_state) break; target_dc_state = states[i + 1]; } return target_dc_state; } /** * intel_display_power_set_target_dc_state - Set target dc state. * @dev_priv: i915 device * @state: state which needs to be set as target_dc_state. * * This function set the "DC off" power well target_dc_state, * based upon this target_dc_stste, "DC off" power well will * enable desired DC state. */ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, u32 state) { struct i915_power_well *power_well; bool dc_off_enabled; struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); if (drm_WARN_ON(&dev_priv->drm, !power_well)) goto unlock; state = sanitize_target_dc_state(dev_priv, state); if (state == power_domains->target_dc_state) goto unlock; dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); /* * If DC off power well is disabled, need to enable and disable the * DC off power well to effect target DC state. */ if (!dc_off_enabled) intel_power_well_enable(dev_priv, power_well); power_domains->target_dc_state = state; if (!dc_off_enabled) intel_power_well_disable(dev_priv, power_well); unlock: mutex_unlock(&power_domains->lock); } #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) static void __async_put_domains_mask(struct i915_power_domains *power_domains, struct intel_power_domain_mask *mask) { bitmap_or(mask->bits, power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) static bool assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); return !drm_WARN_ON(&i915->drm, bitmap_intersects(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)); } static bool __async_put_domains_state_ok(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); struct intel_power_domain_mask async_put_mask; enum intel_display_power_domain domain; bool err = false; err |= !assert_async_put_domain_masks_disjoint(power_domains); __async_put_domains_mask(power_domains, &async_put_mask); err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, &async_put_mask) err |= drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); return !err; } static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, struct intel_power_domain_mask *mask) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); enum intel_display_power_domain domain; drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) drm_dbg(&i915->drm, "%s use_count %d\n", intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } static void print_async_put_domains_state(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); drm_dbg(&i915->drm, "async_put_wakeref %u\n", power_domains->async_put_wakeref); print_power_domains(power_domains, "async_put_domains[0]", &power_domains->async_put_domains[0]); print_power_domains(power_domains, "async_put_domains[1]", &power_domains->async_put_domains[1]); } static void verify_async_put_domains_state(struct i915_power_domains *power_domains) { if (!__async_put_domains_state_ok(power_domains)) print_async_put_domains_state(power_domains); } #else static void assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) { } static void verify_async_put_domains_state(struct i915_power_domains *power_domains) { } #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ static void async_put_domains_mask(struct i915_power_domains *power_domains, struct intel_power_domain_mask *mask) { assert_async_put_domain_masks_disjoint(power_domains); __async_put_domains_mask(power_domains, mask); } static void async_put_domains_clear_domain(struct i915_power_domains *power_domains, enum intel_display_power_domain domain) { assert_async_put_domain_masks_disjoint(power_domains); clear_bit(domain, power_domains->async_put_domains[0].bits); clear_bit(domain, power_domains->async_put_domains[1].bits); } static void cancel_async_put_work(struct i915_power_domains *power_domains, bool sync) { if (sync) cancel_delayed_work_sync(&power_domains->async_put_work); else cancel_delayed_work(&power_domains->async_put_work); power_domains->async_put_next_delay = 0; } static bool intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_power_domain_mask async_put_mask; bool ret = false; async_put_domains_mask(power_domains, &async_put_mask); if (!test_bit(domain, async_put_mask.bits)) goto out_verify; async_put_domains_clear_domain(power_domains, domain); ret = true; async_put_domains_mask(power_domains, &async_put_mask); if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) goto out_verify; cancel_async_put_work(power_domains, false); intel_runtime_pm_put_raw(&dev_priv->runtime_pm, fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); return ret; } static void __intel_display_power_get_domain(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; if (intel_display_power_grab_async_put_ref(dev_priv, domain)) return; for_each_power_domain_well(dev_priv, power_well, domain) intel_power_well_get(dev_priv, power_well); power_domains->domain_use_count[domain]++; } /** * intel_display_power_get - grab a power domain reference * @dev_priv: i915 device instance * @domain: power domain to reference * * This function grabs a power domain reference for @domain and ensures that the * power domain and all its parents are powered up. Therefore users should only * grab a reference to the innermost power domain they need. * * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); mutex_unlock(&power_domains->lock); return wakeref; } /** * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain * @dev_priv: i915 device instance * @domain: power domain to reference * * This function grabs a power domain reference for @domain and ensures that the * power domain and all its parents are powered up. Therefore users should only * grab a reference to the innermost power domain they need. * * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again. */ intel_wakeref_t intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref; bool is_enabled; wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return false; mutex_lock(&power_domains->lock); if (__intel_display_power_is_enabled(dev_priv, domain)) { __intel_display_power_get_domain(dev_priv, domain); is_enabled = true; } else { is_enabled = false; } mutex_unlock(&power_domains->lock); if (!is_enabled) { intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); wakeref = 0; } return wakeref; } static void __intel_display_power_put_domain(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains; struct i915_power_well *power_well; const char *name = intel_display_power_domain_str(domain); struct intel_power_domain_mask async_put_mask; power_domains = &dev_priv->display.power.domains; drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], "Use count on domain %s is already zero\n", name); async_put_domains_mask(power_domains, &async_put_mask); drm_WARN(&dev_priv->drm, test_bit(domain, async_put_mask.bits), "Async disabling of domain %s is pending\n", name); power_domains->domain_use_count[domain]--; for_each_power_domain_well_reverse(dev_priv, power_well, domain) intel_power_well_put(dev_priv, power_well); } static void __intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); __intel_display_power_put_domain(dev_priv, domain); mutex_unlock(&power_domains->lock); } static void queue_async_put_domains_work(struct i915_power_domains *power_domains, intel_wakeref_t wakeref, int delay_ms) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, display.power.domains); drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); power_domains->async_put_wakeref = wakeref; drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, msecs_to_jiffies(delay_ms))); } static void release_async_put_domains(struct i915_power_domains *power_domains, struct intel_power_domain_mask *mask) { struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, display.power.domains); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; /* * The caller must hold already raw wakeref, upgrade that to a proper * wakeref to make the state checker happy about the HW access during * power well disabling. */ assert_rpm_raw_wakeref_held(rpm); wakeref = intel_runtime_pm_get(rpm); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ async_put_domains_clear_domain(power_domains, domain); __intel_display_power_put_domain(dev_priv, domain); } intel_runtime_pm_put(rpm, wakeref); } static void intel_display_power_put_async_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, display.power.domains.async_put_work.work); struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; mutex_lock(&power_domains->lock); /* * Bail out if all the domain refs pending to be released were grabbed * by subsequent gets or a flush_work. */ old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); if (!old_work_wakeref) goto out_verify; release_async_put_domains(power_domains, &power_domains->async_put_domains[0]); /* Requeue the work if more domains were async put meanwhile. */ if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { bitmap_copy(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM); bitmap_zero(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM); queue_async_put_domains_work(power_domains, fetch_and_zero(&new_work_wakeref), power_domains->async_put_next_delay); power_domains->async_put_next_delay = 0; } else { /* * Cancel the work that got queued after this one got dequeued, * since here we released the corresponding async-put reference. */ cancel_async_put_work(power_domains, false); } out_verify: verify_async_put_domains_state(power_domains); mutex_unlock(&power_domains->lock); if (old_work_wakeref) intel_runtime_pm_put_raw(rpm, old_work_wakeref); if (new_work_wakeref) intel_runtime_pm_put_raw(rpm, new_work_wakeref); } /** * __intel_display_power_put_async - release a power domain reference asynchronously * @i915: i915 device instance * @domain: power domain to reference * @wakeref: wakeref acquired for the reference that is being released * @delay_ms: delay of powering down the power domain * * This function drops the power domain reference obtained by * intel_display_power_get*() and schedules a work to power down the * corresponding hardware block if this is the last reference. * The power down is delayed by @delay_ms if this is >= 0, or by a default * 100 ms otherwise. */ void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref, int delay_ms) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); delay_ms = delay_ms >= 0 ? delay_ms : 100; mutex_lock(&power_domains->lock); if (power_domains->domain_use_count[domain] > 1) { __intel_display_power_put_domain(i915, domain); goto out_verify; } drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); /* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) { set_bit(domain, power_domains->async_put_domains[1].bits); power_domains->async_put_next_delay = max(power_domains->async_put_next_delay, delay_ms); } else { set_bit(domain, power_domains->async_put_domains[0].bits); queue_async_put_domains_work(power_domains, fetch_and_zero(&work_wakeref), delay_ms); } out_verify: verify_async_put_domains_state(power_domains); mutex_unlock(&power_domains->lock); if (work_wakeref) intel_runtime_pm_put_raw(rpm, work_wakeref); intel_runtime_pm_put(rpm, wakeref); } /** * intel_display_power_flush_work - flushes the async display power disabling work * @i915: i915 device instance * * Flushes any pending work that was scheduled by a preceding * intel_display_power_put_async() call, completing the disabling of the * corresponding power domains. * * Note that the work handler function may still be running after this * function returns; to ensure that the work handler isn't running use * intel_display_power_flush_work_sync() instead. */ void intel_display_power_flush_work(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; mutex_lock(&power_domains->lock); work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); if (!work_wakeref) goto out_verify; async_put_domains_mask(power_domains, &async_put_mask); release_async_put_domains(power_domains, &async_put_mask); cancel_async_put_work(power_domains, false); out_verify: verify_async_put_domains_state(power_domains); mutex_unlock(&power_domains->lock); if (work_wakeref) intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); } /** * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work * @i915: i915 device instance * * Like intel_display_power_flush_work(), but also ensure that the work * handler function is not running any more when this function returns. */ static void intel_display_power_flush_work_sync(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; intel_display_power_flush_work(i915); cancel_async_put_work(power_domains, true); verify_async_put_domains_state(power_domains); drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /** * intel_display_power_put - release a power domain reference * @dev_priv: i915 device instance * @domain: power domain to reference * @wakeref: wakeref acquired for the reference that is being released * * This function drops the power domain reference obtained by * intel_display_power_get() and might power down the corresponding hardware * block right away if this is the last reference. */ void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { __intel_display_power_put(dev_priv, domain); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } #else /** * intel_display_power_put_unchecked - release an unchecked power domain reference * @dev_priv: i915 device instance * @domain: power domain to reference * * This function drops the power domain reference obtained by * intel_display_power_get() and might power down the corresponding hardware * block right away if this is the last reference. * * This function is only for the power domain code's internal use to suppress wakeref * tracking when the correspondig debug kconfig option is disabled, should not * be used otherwise. */ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { __intel_display_power_put(dev_priv, domain); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } #endif void intel_display_power_get_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, enum intel_display_power_domain domain) { intel_wakeref_t __maybe_unused wf; drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get(i915, domain); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif set_bit(domain, power_domain_set->mask.bits); } bool intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, enum intel_display_power_domain domain) { intel_wakeref_t wf; drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get_if_enabled(i915, domain); if (!wf) return false; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif set_bit(domain, power_domain_set->mask.bits); return true; } void intel_display_power_put_mask_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, struct intel_power_domain_mask *mask) { enum intel_display_power_domain domain; drm_WARN_ON(&i915->drm, !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) { intel_wakeref_t __maybe_unused wf = -1; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); #endif intel_display_power_put(i915, domain, wf); clear_bit(domain, power_domain_set->mask.bits); } } static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) { if (disable_power_well >= 0) return !!disable_power_well; return 1; } static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int enable_dc) { u32 mask; int requested_dc; int max_dc; if (!HAS_DISPLAY(dev_priv)) return 0; if (IS_DG2(dev_priv)) max_dc = 1; else if (IS_DG1(dev_priv)) max_dc = 3; else if (DISPLAY_VER(dev_priv) >= 12) max_dc = 4; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) max_dc = 1; else if (DISPLAY_VER(dev_priv) >= 9) max_dc = 2; else max_dc = 0; /* * DC9 has a separate HW flow from the rest of the DC states, * not depending on the DMC firmware. It's needed by system * suspend/resume, so allow it unconditionally. */ mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || DISPLAY_VER(dev_priv) >= 11 ? DC_STATE_EN_DC9 : 0; if (!dev_priv->params.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { requested_dc = enable_dc; } else if (enable_dc == -1) { requested_dc = max_dc; } else if (enable_dc > max_dc && enable_dc <= 4) { drm_dbg_kms(&dev_priv->drm, "Adjusting requested max DC state (%d->%d)\n", enable_dc, max_dc); requested_dc = max_dc; } else { drm_err(&dev_priv->drm, "Unexpected value for enable_dc (%d)\n", enable_dc); requested_dc = max_dc; } switch (requested_dc) { case 4: mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; break; case 3: mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; break; case 2: mask |= DC_STATE_EN_UPTO_DC6; break; case 1: mask |= DC_STATE_EN_UPTO_DC5; break; } drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); return mask; } /** * intel_power_domains_init - initializes the power domain structures * @dev_priv: i915 device instance * * Initializes the power domain structures for @dev_priv depending upon the * supported platform. */ int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, dev_priv->params.disable_power_well); power_domains->allowed_dc_mask = get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); power_domains->target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); mutex_init(&power_domains->lock); INIT_DELAYED_WORK(&power_domains->async_put_work, intel_display_power_put_async_work); return intel_display_power_map_init(power_domains); } /** * intel_power_domains_cleanup - clean up power domains resources * @dev_priv: i915 device instance * * Release any resources acquired by intel_power_domains_init() */ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { intel_display_power_map_cleanup(&dev_priv->display.power.domains); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); for_each_power_well(dev_priv, power_well) intel_power_well_sync_hw(dev_priv, power_well); mutex_unlock(&power_domains->lock); } static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, enum dbuf_slice slice, bool enable) { i915_reg_t reg = DBUF_CTL_S(slice); bool state; intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, enable ? DBUF_POWER_REQUEST : 0); intel_de_posting_read(dev_priv, reg); udelay(10); state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; drm_WARN(&dev_priv->drm, enable != state, "DBuf slice %d power %s timeout!\n", slice, str_enable_disable(enable)); } void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask; enum dbuf_slice slice; drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", req_slices, slice_mask); drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", req_slices); /* * Might be running this in parallel to gen9_dc_off_power_well_enable * being called from intel_dp_detect for instance, * which causes assertion triggered by race condition, * as gen9_assert_dbuf_enabled might preempt this when registers * were already updated, while dev_priv was not. */ mutex_lock(&power_domains->lock); for_each_dbuf_slice(dev_priv, slice) gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); dev_priv->display.dbuf.enabled_slices = req_slices; mutex_unlock(&power_domains->lock); } static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { u8 slices_mask; dev_priv->display.dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices; if (DISPLAY_VER(dev_priv) >= 14) intel_pmdemand_program_dbuf(dev_priv, slices_mask); /* * Just power up at least 1 slice, we will * figure out later which slices we have and what we need. */ gen9_dbuf_slices_update(dev_priv, slices_mask); } static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) { gen9_dbuf_slices_update(dev_priv, 0); if (DISPLAY_VER(dev_priv) >= 14) intel_pmdemand_program_dbuf(dev_priv, 0); } static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) { enum dbuf_slice slice; if (IS_ALDERLAKE_P(dev_priv)) return; for_each_dbuf_slice(dev_priv, slice) intel_de_rmw(dev_priv, DBUF_CTL_S(slice), DBUF_TRACKER_STATE_SERVICE_MASK, DBUF_TRACKER_STATE_SERVICE(8)); } static void icl_mbus_init(struct drm_i915_private *dev_priv) { unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask; u32 mask, val, i; if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) return; mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | MBUS_ABOX_BT_CREDIT_POOL2_MASK | MBUS_ABOX_B_CREDIT_MASK | MBUS_ABOX_BW_CREDIT_MASK; val = MBUS_ABOX_BT_CREDIT_POOL1(16) | MBUS_ABOX_BT_CREDIT_POOL2(16) | MBUS_ABOX_B_CREDIT(1) | MBUS_ABOX_BW_CREDIT(1); /* * gen12 platforms that use abox1 and abox2 for pixel data reads still * expect us to program the abox_ctl0 register as well, even though * we don't have to program other instance-0 registers like BW_BUDDY. */ if (DISPLAY_VER(dev_priv) == 12) abox_regs |= BIT(0); for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); } static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) { u32 val = intel_de_read(dev_priv, LCPLL_CTL); /* * The LCPLL register should be turned on by the BIOS. For now * let's just check its state and print errors in case * something is wrong. Don't even try to turn it on. */ if (val & LCPLL_CD_SOURCE_FCLK) drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); if (val & LCPLL_PLL_DISABLE) drm_err(&dev_priv->drm, "LCPLL is disabled\n"); if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); } static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) I915_STATE_WARN(dev_priv, crtc->active, "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), "Display power well on\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, "Panel power on\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); if (IS_HASWELL(dev_priv)) I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, "CPU PWM2 enabled\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, "PCH PWM1 enabled\n"); I915_STATE_WARN(dev_priv, (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), "Utility pin enabled in PWM mode\n"); I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); /* * In theory we can still leave IRQs enabled, as long as only the HPD * interrupts remain enabled. We used to check for that, but since it's * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv), "IRQs enabled\n"); } static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) { if (IS_HASWELL(dev_priv)) return intel_de_read(dev_priv, D_COMP_HSW); else return intel_de_read(dev_priv, D_COMP_BDW); } static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) drm_dbg_kms(&dev_priv->drm, "Failed to write to D_COMP\n"); } else { intel_de_write(dev_priv, D_COMP_BDW, val); intel_de_posting_read(dev_priv, D_COMP_BDW); } } /* * This function implements pieces of two sequences from BSpec: * - Sequence for display software to disable LCPLL * - Sequence for display software to allow package C8+ * The steps implemented here are just the steps that actually touch the LCPLL * register. Callers should take care of disabling all the display engine * functions, doing the mode unset, fixing interrupts, etc. */ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, bool switch_to_fclk, bool allow_power_down) { u32 val; assert_can_disable_lcpll(dev_priv); val = intel_de_read(dev_priv, LCPLL_CTL); if (switch_to_fclk) { val |= LCPLL_CD_SOURCE_FCLK; intel_de_write(dev_priv, LCPLL_CTL, val); if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 1)) drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); val = intel_de_read(dev_priv, LCPLL_CTL); } val |= LCPLL_PLL_DISABLE; intel_de_write(dev_priv, LCPLL_CTL, val); intel_de_posting_read(dev_priv, LCPLL_CTL); if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); ndelay(100); if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); if (allow_power_down) { intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); intel_de_posting_read(dev_priv, LCPLL_CTL); } } /* * Fully restores LCPLL, disallowing power down and switching back to LCPLL * source. */ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { u32 val; val = intel_de_read(dev_priv, LCPLL_CTL); if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) return; /* * Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine. To prevent PC8 state, just enable force_wake. */ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; intel_de_write(dev_priv, LCPLL_CTL, val); intel_de_posting_read(dev_priv, LCPLL_CTL); } val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_FORCE; val &= ~D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); val = intel_de_read(dev_priv, LCPLL_CTL); val &= ~LCPLL_PLL_DISABLE; intel_de_write(dev_priv, LCPLL_CTL, val); if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); } /* * Package states C8 and deeper are really deep PC states that can only be * reached when all the devices on the system allow it, so even if the graphics * device allows PC8+, it doesn't mean the system will actually get to these * states. Our driver only allows PC8+ when going into runtime PM. * * The requirements for PC8+ are that all the outputs are disabled, the power * well is disabled and most interrupts are disabled, and these are also * requirements for runtime PM. When these conditions are met, we manually do * the other conditions: disable the interrupts, clocks and switch LCPLL refclk * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard * hang the machine. * * When we really reach PC8 or deeper states (not just when we allow it) we lose * the state of some registers, so when we come back from PC8+ we need to * restore this state. We don't get into PC8+ if we're not in RC6, so we don't * need to take care of the registers kept by RC6. Notice that this happens even * if we don't put the device in PCI D3 state (which is what currently happens * because of the runtime PM support). * * For more, read "Display Sequences for Package C8" on the hardware * documentation. */ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); if (HAS_PCH_LPT_LP(dev_priv)) intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE, 0); lpt_disable_clkout_dp(dev_priv); hsw_disable_lcpll(dev_priv, true, true); } static void hsw_disable_pc8(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); hsw_restore_lcpll(dev_priv); intel_init_pch_refclk(dev_priv); /* Many display registers don't survive PC8+ */ intel_clock_gating_init(dev_priv); } static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, bool enable) { i915_reg_t reg; u32 reset_bits; if (IS_IVYBRIDGE(dev_priv)) { reg = GEN7_MSG_CTL; reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; } else { reg = HSW_NDE_RSTWRN_OPT; reset_bits = RESET_PCH_HANDSHAKE_ENABLE; } if (DISPLAY_VER(dev_priv) >= 14) reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); } static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* enable PCH reset handshake */ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); if (!HAS_DISPLAY(dev_priv)) return; /* enable PG1 and Misc I/O */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); intel_cdclk_init_hw(dev_priv); gen9_dbuf_enable(dev_priv); if (resume) intel_dmc_load_program(dev_priv); } static void skl_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); intel_cdclk_uninit_hw(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ /* disable PG1 and Misc I/O */ mutex_lock(&power_domains->lock); /* * BSpec says to keep the MISC IO power well enabled here, only * remove our request for power well 1. * Note that even though the driver's request is removed power well 1 * may stay enabled after this due to DMC's own request on it. */ well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); usleep_range(10, 30); /* 10 us delay per Bspec */ } static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT * or else the reset will hang because there is no PCH to respond. * Move the handshake programming to initialization sequence. * Previously was left up to BIOS. */ intel_pch_reset_handshake(dev_priv, false); if (!HAS_DISPLAY(dev_priv)) return; /* Enable PG1 */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); intel_cdclk_init_hw(dev_priv); gen9_dbuf_enable(dev_priv); if (resume) intel_dmc_load_program(dev_priv); } static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); intel_cdclk_uninit_hw(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ /* * Disable PW1 (PG1). * Note that even though the driver's request is removed power well 1 * may stay enabled after this due to DMC's own request on it. */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); usleep_range(10, 30); /* 10 us delay per Bspec */ } struct buddy_page_mask { u32 page_mask; u8 type; u8 num_channels; }; static const struct buddy_page_mask tgl_buddy_page_masks[] = { { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, {} }; static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, {} }; static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) { enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask; int config, i; /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) return; if (IS_ALDERLAKE_S(dev_priv) || (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))) /* Wa_1409767108 */ table = wa_1409767108_buddy_page_masks; else table = tgl_buddy_page_masks; for (config = 0; table[config].page_mask != 0; config++) if (table[config].num_channels == num_channels && table[config].type == type) break; if (table[config].page_mask == 0) { drm_dbg(&dev_priv->drm, "Unknown memory configuration; disabling address buddy logic.\n"); for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) intel_de_write(dev_priv, BW_BUDDY_CTL(i), BW_BUDDY_DISABLE); } else { for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), table[config].page_mask); /* Wa_22010178259:tgl,dg1,rkl,adl-s */ if (DISPLAY_VER(dev_priv) == 12) intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), BW_BUDDY_TLB_REQ_TIMER_MASK, BW_BUDDY_TLB_REQ_TIMER(0x8)); } } } static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1) intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, PCH_DPMGUNIT_CLOCK_GATE_DISABLE); /* 1. Enable PCH reset handshake. */ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); if (!HAS_DISPLAY(dev_priv)) return; /* 2. Initialize all combo phys */ intel_combo_phy_init(dev_priv); /* * 3. Enable Power Well 1 (PG1). * The AUX IO power wells will be enabled on demand. */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); if (DISPLAY_VER(dev_priv) == 14) intel_de_rmw(dev_priv, DC_STATE_EN, HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); /* 4. Enable CDCLK. */ intel_cdclk_init_hw(dev_priv); if (DISPLAY_VER(dev_priv) >= 12) gen12_dbuf_slices_config(dev_priv); /* 5. Enable DBUF. */ gen9_dbuf_enable(dev_priv); /* 6. Setup MBUS. */ icl_mbus_init(dev_priv); /* 7. Program arbiter BW_BUDDY registers */ if (DISPLAY_VER(dev_priv) >= 12) tgl_bw_buddy_init(dev_priv); /* 8. Ensure PHYs have completed calibration and adaptation */ if (IS_DG2(dev_priv)) intel_snps_phy_wait_for_calibration(dev_priv); if (resume) intel_dmc_load_program(dev_priv); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ if (DISPLAY_VER(dev_priv) >= 12) intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); /* Wa_14011503030:xelpd */ if (DISPLAY_VER(dev_priv) >= 13) intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); } static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); intel_dmc_disable_program(dev_priv); /* 1. Disable all display engine functions -> aready done */ /* 2. Disable DBUF */ gen9_dbuf_disable(dev_priv); /* 3. Disable CD clock */ intel_cdclk_uninit_hw(dev_priv); if (DISPLAY_VER(dev_priv) == 14) intel_de_rmw(dev_priv, DC_STATE_EN, 0, HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); /* * 4. Disable Power Well 1 (PG1). * The AUX IO power wells are toggled on demand, so they are already * disabled at this point. */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); /* 5. */ intel_combo_phy_uninit(dev_priv); } static void chv_phy_control_init(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); /* * DISPLAY_PHY_CONTROL can get corrupted if read. As a * workaround never ever read DISPLAY_PHY_CONTROL, and * instead maintain a shadow copy ourselves. Use the actual * power well state and lane status to reconstruct the * expected initial value. */ dev_priv->display.power.chv_phy_control = PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); /* * If all lanes are disabled we leave the override disabled * with all power down bits cleared to match the state we * would use after disabling the port. Otherwise enable the * override and set the lane powerdown bits accding to the * current lane status. */ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); unsigned int mask; mask = status & DPLL_PORTB_READY_MASK; if (mask == 0xf) mask = 0x0; else dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); mask = (status & DPLL_PORTC_READY_MASK) >> 4; if (mask == 0xf) mask = 0x0; else dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; } else { dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; } if (intel_power_well_is_enabled(dev_priv, cmn_d)) { u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); unsigned int mask; mask = status & DPLL_PORTD_READY_MASK; if (mask == 0xf) mask = 0x0; else dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; } else { dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; } drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", dev_priv->display.power.chv_phy_control); /* Defer application of initial phy_control to enabling the powerwell */ } static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *disp2d = lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); /* If the display might be already active skip this */ if (intel_power_well_is_enabled(dev_priv, cmn) && intel_power_well_is_enabled(dev_priv, disp2d) && intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) return; drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); /* cmnlane needs DPLL registers */ intel_power_well_enable(dev_priv, disp2d); /* * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: * Need to assert and de-assert PHY SB reset by gating the * common lane power, then un-gating it. * Simply ungating isn't enough to reset the PHY enough to get * ports and lanes running. */ intel_power_well_disable(dev_priv, cmn); } static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) { bool ret; vlv_punit_get(dev_priv); ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; vlv_punit_put(dev_priv); return ret; } static void assert_ved_power_gated(struct drm_i915_private *dev_priv) { drm_WARN(&dev_priv->drm, !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), "VED not power gated\n"); } static void assert_isp_power_gated(struct drm_i915_private *dev_priv) { static const struct pci_device_id isp_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, {} }; drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), "ISP not power gated\n"); } static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); /** * intel_power_domains_init_hw - initialize hardware power domain state * @i915: i915 device instance * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all * power wells belonging to the INIT power domain. Power wells in other * domains (and not in the INIT domain) are referenced or disabled by * intel_modeset_readout_hw_state(). After that the reference count of each * power well must match its HW enabled state, see * intel_power_domains_verify_state(). * * It will return with power domains disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with * intel_power_domains_driver_remove(). */ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { struct i915_power_domains *power_domains = &i915->display.power.domains; power_domains->initializing = true; if (DISPLAY_VER(i915) >= 11) { icl_display_core_init(i915, resume); } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_display_core_init(i915, resume); } else if (DISPLAY_VER(i915) == 9) { skl_display_core_init(i915, resume); } else if (IS_CHERRYVIEW(i915)) { mutex_lock(&power_domains->lock); chv_phy_control_init(i915); mutex_unlock(&power_domains->lock); assert_isp_power_gated(i915); } else if (IS_VALLEYVIEW(i915)) { mutex_lock(&power_domains->lock); vlv_cmnlane_wa(i915); mutex_unlock(&power_domains->lock); assert_ved_power_gated(i915); assert_isp_power_gated(i915); } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { hsw_assert_cdclk(i915); intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); } else if (IS_IVYBRIDGE(i915)) { intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); } /* * Keep all power wells enabled for any dependent HW access during * initialization and to make sure we keep BIOS enabled display HW * resources powered until display HW readout is complete. We drop * this reference in intel_power_domains_enable(). */ drm_WARN_ON(&i915->drm, power_domains->init_wakeref); power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ if (!i915->params.disable_power_well) { drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } intel_power_domains_sync_hw(i915); power_domains->initializing = false; } /** * intel_power_domains_driver_remove - deinitialize hw power domain state * @i915: i915 device instance * * De-initializes the display power domain HW state. It also ensures that the * device stays powered up so that the driver can be reloaded. * * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw(). */ void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&i915->display.power.domains.init_wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work_sync(i915); intel_power_domains_verify_state(i915); /* Keep the power well enabled, but cancel its rpm wakeref. */ intel_runtime_pm_put(&i915->runtime_pm, wakeref); } /** * intel_power_domains_sanitize_state - sanitize power domains state * @i915: i915 device instance * * Sanitize the power domains state during driver loading and system resume. * The function will disable all display power wells that BIOS has enabled * without a user for it (any user for a power well has taken a reference * on it by the time this function is called, after the state of all the * pipe, encoder, etc. HW resources have been sanitized). */ void intel_power_domains_sanitize_state(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); for_each_power_well_reverse(i915, power_well) { if (power_well->desc->always_on || power_well->count || !intel_power_well_is_enabled(i915, power_well)) continue; drm_dbg_kms(&i915->drm, "BIOS left unused %s power well enabled, disabling it\n", intel_power_well_name(power_well)); intel_power_well_disable(i915, power_well); } mutex_unlock(&power_domains->lock); } /** * intel_power_domains_enable - enable toggling of display power wells * @i915: i915 device instance * * Enable the ondemand enabling/disabling of the display power wells. Note that * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled * only at specific points of the display modeset sequence, thus they are not * affected by the intel_power_domains_enable()/disable() calls. The purpose * of these function is to keep the rest of power wells enabled until the end * of display HW readout (which will acquire the power references reflecting * the current HW state). */ void intel_power_domains_enable(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&i915->display.power.domains.init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); intel_power_domains_verify_state(i915); } /** * intel_power_domains_disable - disable toggling of display power wells * @i915: i915 device instance * * Disable the ondemand enabling/disabling of the display power wells. See * intel_power_domains_enable() for which power wells this call controls. */ void intel_power_domains_disable(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; drm_WARN_ON(&i915->drm, power_domains->init_wakeref); power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); intel_power_domains_verify_state(i915); } /** * intel_power_domains_suspend - suspend power domain state * @i915: i915 device instance * @s2idle: specifies whether we go to idle, or deeper sleep * * This function prepares the hardware power domain state before entering * system suspend. * * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) { struct i915_power_domains *power_domains = &i915->display.power.domains; intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&power_domains->init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); /* * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 * support don't manually deinit the power domains. This also means the * DMC firmware will stay active, it will power down any HW * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); return; } /* * Even if power well support was disabled we still want to disable * power wells if power domains must be deinitialized for suspend. */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); if (DISPLAY_VER(i915) >= 11) icl_display_core_uninit(i915); else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) bxt_display_core_uninit(i915); else if (DISPLAY_VER(i915) == 9) skl_display_core_uninit(i915); power_domains->display_core_suspended = true; } /** * intel_power_domains_resume - resume power domain state * @i915: i915 device instance * * This function resume the hardware power domain state during system resume. * * It will return with power domain support disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with * intel_power_domains_suspend(). */ void intel_power_domains_resume(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; if (power_domains->display_core_suspended) { intel_power_domains_init_hw(i915, true); power_domains->display_core_suspended = false; } else { drm_WARN_ON(&i915->drm, power_domains->init_wakeref); power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } intel_power_domains_verify_state(i915); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) static void intel_power_domains_dump_info(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; drm_dbg(&i915->drm, "%-25s %d\n", intel_power_well_name(power_well), intel_power_well_refcount(power_well)); for_each_power_domain(domain, intel_power_well_domains(power_well)) drm_dbg(&i915->drm, " %-23s %d\n", intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } } /** * intel_power_domains_verify_state - verify the HW/SW state for all power wells * @i915: i915 device instance * * Verify if the reference count of each power well matches its HW enabled * state and the total refcount of the domains it belongs to. This must be * called after modeset HW state sanitization, which is responsible for * acquiring reference counts for any power wells in use and disabling the * ones left on by BIOS but not required by any active output. */ static void intel_power_domains_verify_state(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; bool dump_domain_info; mutex_lock(&power_domains->lock); verify_async_put_domains_state(power_domains); dump_domain_info = false; for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; int domains_count; bool enabled; enabled = intel_power_well_is_enabled(i915, power_well); if ((intel_power_well_refcount(power_well) || intel_power_well_is_always_on(power_well)) != enabled) drm_err(&i915->drm, "power well %s state mismatch (refcount %d/enabled %d)", intel_power_well_name(power_well), intel_power_well_refcount(power_well), enabled); domains_count = 0; for_each_power_domain(domain, intel_power_well_domains(power_well)) domains_count += power_domains->domain_use_count[domain]; if (intel_power_well_refcount(power_well) != domains_count) { drm_err(&i915->drm, "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", intel_power_well_name(power_well), intel_power_well_refcount(power_well), domains_count); dump_domain_info = true; } } if (dump_domain_info) { static bool dumped; if (!dumped) { intel_power_domains_dump_info(i915); dumped = true; } } mutex_unlock(&power_domains->lock); } #else static void intel_power_domains_verify_state(struct drm_i915_private *i915) { } #endif void intel_display_power_suspend_late(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_enable_dc9(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } void intel_display_power_resume_early(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { gen9_sanitize_dc_state(i915); bxt_disable_dc9(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); } void intel_display_power_suspend(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 11) { icl_display_core_uninit(i915); bxt_enable_dc9(i915); } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_display_core_uninit(i915); bxt_enable_dc9(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } } void intel_display_power_resume(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; if (DISPLAY_VER(i915) >= 11) { bxt_disable_dc9(i915); icl_display_core_init(i915, true); if (intel_dmc_has_payload(i915)) { if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(i915); else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(i915); } } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_disable_dc9(i915); bxt_display_core_init(i915, true); if (intel_dmc_has_payload(i915) && (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) gen9_enable_dc5(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } } void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) { struct i915_power_domains *power_domains = &i915->display.power.domains; int i; mutex_lock(&power_domains->lock); seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); for (i = 0; i < power_domains->power_well_count; i++) { struct i915_power_well *power_well; enum intel_display_power_domain power_domain; power_well = &power_domains->power_wells[i]; seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), intel_power_well_refcount(power_well)); for_each_power_domain(power_domain, intel_power_well_domains(power_well)) seq_printf(m, " %-23s %d\n", intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); } mutex_unlock(&power_domains->lock); } struct intel_ddi_port_domains { enum port port_start; enum port port_end; enum aux_ch aux_ch_start; enum aux_ch aux_ch_end; enum intel_display_power_domain ddi_lanes; enum intel_display_power_domain ddi_io; enum intel_display_power_domain aux_io; enum intel_display_power_domain aux_legacy_usbc; enum intel_display_power_domain aux_tbt; }; static const struct intel_ddi_port_domains i9xx_port_domains[] = { { .port_start = PORT_A, .port_end = PORT_F, .aux_ch_start = AUX_CH_A, .aux_ch_end = AUX_CH_F, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, .aux_io = POWER_DOMAIN_AUX_IO_A, .aux_legacy_usbc = POWER_DOMAIN_AUX_A, .aux_tbt = POWER_DOMAIN_INVALID, }, }; static const struct intel_ddi_port_domains d11_port_domains[] = { { .port_start = PORT_A, .port_end = PORT_B, .aux_ch_start = AUX_CH_A, .aux_ch_end = AUX_CH_B, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, .aux_io = POWER_DOMAIN_AUX_IO_A, .aux_legacy_usbc = POWER_DOMAIN_AUX_A, .aux_tbt = POWER_DOMAIN_INVALID, }, { .port_start = PORT_C, .port_end = PORT_F, .aux_ch_start = AUX_CH_C, .aux_ch_end = AUX_CH_F, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, .aux_io = POWER_DOMAIN_AUX_IO_C, .aux_legacy_usbc = POWER_DOMAIN_AUX_C, .aux_tbt = POWER_DOMAIN_AUX_TBT1, }, }; static const struct intel_ddi_port_domains d12_port_domains[] = { { .port_start = PORT_A, .port_end = PORT_C, .aux_ch_start = AUX_CH_A, .aux_ch_end = AUX_CH_C, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, .aux_io = POWER_DOMAIN_AUX_IO_A, .aux_legacy_usbc = POWER_DOMAIN_AUX_A, .aux_tbt = POWER_DOMAIN_INVALID, }, { .port_start = PORT_TC1, .port_end = PORT_TC6, .aux_ch_start = AUX_CH_USBC1, .aux_ch_end = AUX_CH_USBC6, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, .aux_io = POWER_DOMAIN_INVALID, .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, .aux_tbt = POWER_DOMAIN_AUX_TBT1, }, }; static const struct intel_ddi_port_domains d13_port_domains[] = { { .port_start = PORT_A, .port_end = PORT_C, .aux_ch_start = AUX_CH_A, .aux_ch_end = AUX_CH_C, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, .aux_io = POWER_DOMAIN_AUX_IO_A, .aux_legacy_usbc = POWER_DOMAIN_AUX_A, .aux_tbt = POWER_DOMAIN_INVALID, }, { .port_start = PORT_TC1, .port_end = PORT_TC4, .aux_ch_start = AUX_CH_USBC1, .aux_ch_end = AUX_CH_USBC4, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, .aux_io = POWER_DOMAIN_INVALID, .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, .aux_tbt = POWER_DOMAIN_AUX_TBT1, }, { .port_start = PORT_D_XELPD, .port_end = PORT_E_XELPD, .aux_ch_start = AUX_CH_D_XELPD, .aux_ch_end = AUX_CH_E_XELPD, .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, .aux_io = POWER_DOMAIN_AUX_IO_D, .aux_legacy_usbc = POWER_DOMAIN_AUX_D, .aux_tbt = POWER_DOMAIN_INVALID, }, }; static void intel_port_domains_for_platform(struct drm_i915_private *i915, const struct intel_ddi_port_domains **domains, int *domains_size) { if (DISPLAY_VER(i915) >= 13) { *domains = d13_port_domains; *domains_size = ARRAY_SIZE(d13_port_domains); } else if (DISPLAY_VER(i915) >= 12) { *domains = d12_port_domains; *domains_size = ARRAY_SIZE(d12_port_domains); } else if (DISPLAY_VER(i915) >= 11) { *domains = d11_port_domains; *domains_size = ARRAY_SIZE(d11_port_domains); } else { *domains = i9xx_port_domains; *domains_size = ARRAY_SIZE(i9xx_port_domains); } } static const struct intel_ddi_port_domains * intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) { const struct intel_ddi_port_domains *domains; int domains_size; int i; intel_port_domains_for_platform(i915, &domains, &domains_size); for (i = 0; i < domains_size; i++) if (port >= domains[i].port_start && port <= domains[i].port_end) return &domains[i]; return NULL; } enum intel_display_power_domain intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_IO_A; return domains->ddi_io + (int)(port - domains->port_start); } enum intel_display_power_domain intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_PORT_DDI_LANES_A; return domains->ddi_lanes + (int)(port - domains->port_start); } static const struct intel_ddi_port_domains * intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) { const struct intel_ddi_port_domains *domains; int domains_size; int i; intel_port_domains_for_platform(i915, &domains, &domains_size); for (i = 0; i < domains_size; i++) if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) return &domains[i]; return NULL; } enum intel_display_power_domain intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_IO_A; return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); } enum intel_display_power_domain intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_A; return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); } enum intel_display_power_domain intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) { const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) return POWER_DOMAIN_AUX_TBT1; return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); }
linux-master
drivers/gpu/drm/i915/display/intel_display_power.c
/* * Copyright © 2018 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Madhav Chauhan <[email protected]> * Jani Nikula <[email protected]> */ #include <drm/display/drm_dsc_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> #include "i915_reg.h" #include "icl_dsi.h" #include "icl_dsi_regs.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" static int header_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) { return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } static int payload_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) { return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; } static bool wait_for_header_credits(struct drm_i915_private *dev_priv, enum transcoder dsi_trans, int hdr_credit) { if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= hdr_credit, 100)) { drm_err(&dev_priv->drm, "DSI header credits not released\n"); return false; } return true; } static bool wait_for_payload_credits(struct drm_i915_private *dev_priv, enum transcoder dsi_trans, int payld_credit) { if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= payld_credit, 100)) { drm_err(&dev_priv->drm, "DSI payload credits not released\n"); return false; } return true; } static enum transcoder dsi_port_to_transcoder(enum port port) { if (port == PORT_A) return TRANSCODER_DSI_0; else return TRANSCODER_DSI_1; } static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; int ret; /* wait for header/payload credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT); } /* send nop DCS command */ for_each_dsi_port(port, intel_dsi->ports) { dsi = intel_dsi->dsi_hosts[port]->device; dsi->mode_flags |= MIPI_DSI_MODE_LPM; dsi->channel = 0; ret = mipi_dsi_dcs_nop(dsi); if (ret < 0) drm_err(&dev_priv->drm, "error sending DCS NOP command\n"); } /* wait for header credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); } /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & LPTX_IN_PROGRESS), 20)) drm_err(&dev_priv->drm, "LPTX bit not cleared\n"); } } static int dsi_send_pkt_payld(struct intel_dsi_host *host, const struct mipi_dsi_packet *packet) { struct intel_dsi *intel_dsi = host->intel_dsi; struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); const u8 *data = packet->payload; u32 len = packet->payload_length; int i, j; /* payload queue can accept *256 bytes*, check limit */ if (len > MAX_PLOAD_CREDIT * 4) { drm_err(&i915->drm, "payload size exceeds max queue limit\n"); return -EINVAL; } for (i = 0; i < len; i += 4) { u32 tmp = 0; if (!wait_for_payload_credits(i915, dsi_trans, 1)) return -EBUSY; for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp); } return 0; } static int dsi_send_pkt_hdr(struct intel_dsi_host *host, const struct mipi_dsi_packet *packet, bool enable_lpdt) { struct intel_dsi *intel_dsi = host->intel_dsi; struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); u32 tmp; if (!wait_for_header_credits(dev_priv, dsi_trans, 1)) return -EBUSY; tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); if (packet->payload) tmp |= PAYLOAD_PRESENT; else tmp &= ~PAYLOAD_PRESENT; tmp &= ~VBLANK_FENCE; if (enable_lpdt) tmp |= LP_DATA_TRANSFER; else tmp &= ~LP_DATA_TRANSFER; tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT); tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT); tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT); tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT); intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 mode_flags; enum port port; mode_flags = crtc_state->mode_flags; /* * case 1 also covers dual link * In case of dual link, frame update should be set on * DSI_0 */ if (mode_flags & I915_MODE_FLAG_DSI_USE_TE0) port = PORT_A; else if (mode_flags & I915_MODE_FLAG_DSI_USE_TE1) port = PORT_B; else return; intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp, mask, val; int lane; for_each_dsi_phy(phy, intel_dsi->phys) { /* * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | RTERM_SELECT(0x6); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~mask; tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK; val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | RCOMP_SCALAR(0x98); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~mask; tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK; val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | CURSOR_COEFF(0x3f); intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); /* Bspec: must not use GRP register for write */ for (lane = 0; lane <= 3; lane++) intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), mask, val); } } static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; /* FIXME: Move all DSS handling to intel_vdsc.c */ if (DISPLAY_VER(dev_priv) >= 12) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe); } else { dss_ctl1_reg = DSS_CTL1; dss_ctl2_reg = DSS_CTL2; } dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u16 hactive = adjusted_mode->crtc_hdisplay; u16 dl_buffer_depth; dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE; dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) drm_err(&dev_priv->drm, "DL buffer depth exceed max value\n"); dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ static int afe_clk(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp; if (crtc_state->dsc.compression_enable) bpp = crtc_state->dsc.compressed_bpp; else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count); } static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; int afe_clk_khz; int theo_word_clk, act_word_clk; u32 esc_clk_div_m, esc_clk_div_m_phy; afe_clk_khz = afe_clk(encoder, crtc_state); if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) { theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK); act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2); esc_clk_div_m = act_word_clk * 8; esc_clk_div_m_phy = (act_word_clk - 1) / 2; } else { esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); } for_each_dsi_port(port, intel_dsi->ports) { intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)); } for_each_dsi_port(port, intel_dsi->ports) { intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)); } if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) { intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8), esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY); intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8)); } } } static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, struct intel_dsi *intel_dsi) { enum port port; for_each_dsi_port(port, intel_dsi->ports) { drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]); intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? POWER_DOMAIN_PORT_DDI_IO_A : POWER_DOMAIN_PORT_DDI_IO_B); } } static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), 0, COMBO_PHY_MODE_DSI); get_dsi_io_power_domains(dev_priv, intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; for_each_dsi_phy(phy, intel_dsi->phys) intel_combo_phy_power_up_lanes(dev_priv, phy, true, intel_dsi->lane_count, false); } static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp; int lane; /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); for (lane = 0; lane <= 3; lane++) intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) { intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); } } } static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* * Set SUS Clock Config bitfield to 11b * Note: loadgen select program is done * as part of lane phy sequence configuration */ for_each_dsi_phy(phy, intel_dsi->phys) intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ dsi_program_swing_and_deemphasis(encoder); /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 500)) drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n", port_name(port)); } } static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum phy phy; /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* Program DPHY data lanes timings */ for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* * If DSI link operating at or below an 800 MHz, * TA_SURE should be override and programmed to * a value '0' inside TA_PARAM_REGISTERS otherwise * leave all fields at HW default values. */ if (DISPLAY_VER(dev_priv) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } } if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } static void gen11_dsi_setup_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* shadow register inside display core */ if (DISPLAY_VER(dev_priv) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } } } } static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->display.dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->display.dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); bool clock_enabled = false; enum phy phy; u32 tmp; tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy))) clock_enabled = true; } return clock_enabled; } static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; mutex_lock(&dev_priv->display.dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); mutex_unlock(&dev_priv->display.dpll.lock); } static void gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = crtc->pipe; u32 tmp; enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); if (intel_dsi->eotp_pkt) tmp &= ~EOTP_DISABLED; else tmp |= EOTP_DISABLED; /* enable link calibration if freq > 1.5Gbps */ if (afe_clk(encoder, pipe_config) >= 1500 * 1000) { tmp &= ~LINK_CALIBRATION_MASK; tmp |= CALIBRATION_ENABLED_INITIAL_ONLY; } /* configure continuous clock */ tmp &= ~CONTINUOUS_CLK_MASK; if (intel_dsi->clock_stop) tmp |= CLK_ENTER_LP_AFTER_DATA; else tmp |= CLK_HS_CONTINUOUS; /* configure buffer threshold limit to minimum */ tmp &= ~PIX_BUF_THRESHOLD_MASK; tmp |= PIX_BUF_THRESHOLD_1_4; /* set virtual channel to '0' */ tmp &= ~PIX_VIRT_CHAN_MASK; tmp |= PIX_VIRT_CHAN(0); /* program BGR transmission */ if (intel_dsi->bgr_enabled) tmp |= BGR_TRANSMISSION; /* select pixel format */ tmp &= ~PIX_FMT_MASK; if (pipe_config->dsc.compression_enable) { tmp |= PIX_FMT_COMPRESSED; } else { switch (intel_dsi->pixel_format) { default: MISSING_CASE(intel_dsi->pixel_format); fallthrough; case MIPI_DSI_FMT_RGB565: tmp |= PIX_FMT_RGB565; break; case MIPI_DSI_FMT_RGB666_PACKED: tmp |= PIX_FMT_RGB666_PACKED; break; case MIPI_DSI_FMT_RGB666: tmp |= PIX_FMT_RGB666_LOOSE; break; case MIPI_DSI_FMT_RGB888: tmp |= PIX_FMT_RGB888; break; } } if (DISPLAY_VER(dev_priv) >= 12) { if (is_vid_mode(intel_dsi)) tmp |= BLANKING_PACKET_ENABLE; } /* program DSI operation mode */ if (is_vid_mode(intel_dsi)) { tmp &= ~OP_MODE_MASK; switch (intel_dsi->video_mode) { default: MISSING_CASE(intel_dsi->video_mode); fallthrough; case NON_BURST_SYNC_EVENTS: tmp |= VIDEO_MODE_SYNC_EVENT; break; case NON_BURST_SYNC_PULSE: tmp |= VIDEO_MODE_SYNC_PULSE; break; } } else { /* * FIXME: Retrieve this info from VBT. * As per the spec when dsi transcoder is operating * in TE GATE mode, TE comes from GPIO * which is UTIL PIN for DSI 0. * Also this GPIO would not be used for other * purposes is an assumption. */ tmp &= ~OP_MODE_MASK; tmp |= CMD_MODE_TE_GATE; tmp |= TE_SOURCE_GPIO; } intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); } /* enable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), 0, PORT_SYNC_MODE_ENABLE); } /* configure stream splitting */ configure_dual_link_mode(encoder, pipe_config); } for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* select data lane width */ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); tmp &= ~DDI_PORT_WIDTH_MASK; tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); /* select input pipe */ tmp &= ~TRANS_DDI_EDP_INPUT_MASK; switch (pipe) { default: MISSING_CASE(pipe); fallthrough; case PIPE_A: tmp |= TRANS_DDI_EDP_INPUT_A_ON; break; case PIPE_B: tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF; break; case PIPE_C: tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF; break; case PIPE_D: tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF; break; } /* enable DDI buffer */ tmp |= TRANS_DDI_FUNC_ENABLE; intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); } /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) & LINK_READY), 2500)) drm_err(&dev_priv->drm, "DSI link not ready\n"); } } static void gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum port port; enum transcoder dsi_trans; /* horizontal timings */ u16 htotal, hactive, hsync_start, hsync_end, hsync_size; u16 hback_porch; /* vertical timings */ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; int mul = 1, div = 1; /* * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account * for slower link speed if DSC is enabled. * * The compression frequency ratio is the ratio between compressed and * non-compressed link speeds, and simplifies down to the ratio between * compressed and non-compressed bpp. */ if (crtc_state->dsc.compression_enable) { mul = crtc_state->dsc.compressed_bpp; div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); } hactive = adjusted_mode->crtc_hdisplay; if (is_vid_mode(intel_dsi)) htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); else htotal = DIV_ROUND_UP((hactive + 160) * mul, div); hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div); hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div); hsync_size = hsync_end - hsync_start; hback_porch = (adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end); vactive = adjusted_mode->crtc_vdisplay; if (is_vid_mode(intel_dsi)) { vtotal = adjusted_mode->crtc_vtotal; } else { int bpp, line_time_us, byte_clk_period_ns; if (crtc_state->dsc.compression_enable) bpp = crtc_state->dsc.compressed_bpp; else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state); line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count); vtotal = vactive + DIV_ROUND_UP(400, line_time_us); } vsync_start = adjusted_mode->crtc_vsync_start; vsync_end = adjusted_mode->crtc_vsync_end; vsync_shift = hsync_start - htotal / 2; if (intel_dsi->dual_link) { hactive /= 2; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) hactive += intel_dsi->pixel_overlap; htotal /= 2; } /* minimum hactive as per bspec: 256 pixels */ if (adjusted_mode->crtc_hdisplay < 256) drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n"); /* if RGB666 format, then hactive must be multiple of 4 pixels */ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) drm_err(&dev_priv->drm, "hactive pixels are not multiple of 4\n"); /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans), HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } /* TRANS_HSYNC register to be programmed only for video mode */ if (is_vid_mode(intel_dsi)) { if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) drm_err(&dev_priv->drm, "hsync size < 16 pixels\n"); } if (hback_porch < 16) drm_err(&dev_priv->drm, "hback porch < 16 pixels\n"); if (intel_dsi->dual_link) { hsync_start /= 2; hsync_end /= 2; } for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans), HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } /* program TRANS_VTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* * FIXME: Programing this by assuming progressive mode, since * non-interlaced info from VBT is not saved inside * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans), VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) drm_err(&dev_priv->drm, "Invalid vsync_end value\n"); if (vsync_start < vactive) drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); /* program TRANS_VSYNC register for video mode only */ if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans), VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } /* * FIXME: It has to be programmed only for video modes and interlaced * modes. Put the check condition here once interlaced * info available as described above. * program TRANS_VSYNCSHIFT register */ if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans), vsync_shift); } } /* * program TRANS_VBLANK register, should be same as vtotal programmed * * FIXME get rid of these local hacks and do it right, * this will not handle eg. delayed vblank correctly. */ if (DISPLAY_VER(dev_priv) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans), VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } } static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_STATE_ENABLE, 10)) drm_err(&dev_priv->drm, "DSI transcoder not enabled\n"); } } static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; /* * escape clock count calculation: * BYTE_CLK_COUNT = TIME_NS/(8 * UI) * UI (nsec) = (10^6)/Bitrate * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS */ divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000; mul = 8 * 1000000; hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul, divisor); lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor); ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor); for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), HSTX_TIMEOUT_VALUE_MASK, HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), LPRX_TIMEOUT_VALUE_MASK, LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), TA_TIMEOUT_VALUE_MASK, TA_TIMEOUT_VALUE(ta_timeout)); } } static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, bool enable) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; /* * used as TE i/p for DSI0, * for dual link/DSI1 TE is from slave DSI1 * through GPIO. */ if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B))) return; tmp = intel_de_read(dev_priv, UTIL_PIN_CTL); if (enable) { tmp |= UTIL_PIN_DIRECTION_INPUT; tmp |= UTIL_PIN_ENABLE; } else { tmp &= ~UTIL_PIN_ENABLE; } intel_de_write(dev_priv, UTIL_PIN_CTL, tmp); } static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); /* step 4b: configure lane sequencing of the Combo-PHY transmitters */ gen11_dsi_config_phy_lanes_sequence(encoder); /* step 4c: configure voltage swing and skew */ gen11_dsi_voltage_swing_program_seq(encoder); /* setup D-PHY timings */ gen11_dsi_setup_dphy_timings(encoder, crtc_state); /* enable DDI buffer */ gen11_dsi_enable_ddi_buffer(encoder); gen11_dsi_gate_clocks(encoder); gen11_dsi_setup_timings(encoder, crtc_state); /* Since transcoder is configured to take events from GPIO */ gen11_dsi_config_util_pin(encoder, true); /* step 4h: setup DSI protocol timeouts */ gen11_dsi_setup_timeouts(encoder, crtc_state); /* Step (4h, 4i, 4j, 4k): Configure transcoder */ gen11_dsi_configure_transcoder(encoder, crtc_state); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; enum transcoder dsi_trans; u32 tmp; int ret; /* set maximum return packet size */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* * FIXME: This uses the number of DW's currently in the payload * receive queue. This is probably not what we want here. */ tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)); tmp &= NUMBER_RX_PLOAD_DW_MASK; /* multiply "Number Rx Payload DW" by 4 to get max value */ tmp = tmp * 4; dsi = intel_dsi->dsi_hosts[port]->device; ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); if (ret < 0) drm_err(&dev_priv->drm, "error setting max return pkt size%d\n", tmp); } intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); /* ensure all panel commands dispatched before enabling transcoder */ wait_for_cmds_dispatched_to_panel(encoder); } static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_wait_panel_power_cycle(intel_dsi); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); msleep(intel_dsi->panel_on_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); /* step2: enable IO power */ gen11_dsi_enable_io_power(encoder); /* step3: enable DSI PLL */ gen11_dsi_program_esc_clk_div(encoder, crtc_state); } static void gen11_dsi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { /* step3b */ gen11_dsi_map_pll(encoder, pipe_config); /* step4: enable DSI port and DPHY */ gen11_dsi_enable_port_and_phy(encoder, pipe_config); /* step5: program and powerup panel */ gen11_dsi_powerup_panel(encoder); intel_dsc_dsi_pps_write(encoder, pipe_config); /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); } /* * Wa_1409054076:icl,jsl,ehl * When pipe A is disabled and MIPI DSI is enabled on pipe B, * the AMT KVMR feature will incorrectly see pipe A as enabled. * Set 0x42080 bit 23=1 before enabling DSI on pipe B and leave * it set while DSI is enabled on pipe B */ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_KVMR_PIPE_A, enable ? IGNORE_KVMR_PIPE_A : 0); } /* * Wa_16012360555:adl-p * SW will have to program the "LP to HS Wakeup Guardband" * to account for the repeaters on the HS Request/Ready * PPI signaling between the Display engine and the DPHY. */ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; if (DISPLAY_VER(i915) == 13) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), TGL_DSI_CHKN_LSHS_GB_MASK, TGL_DSI_CHKN_LSHS_GB(4)); } } static void gen11_dsi_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* Wa_1409054076:icl,jsl,ehl */ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true); /* Wa_16012360555:adl-p */ adlp_set_lp_hs_wakeup_gb(encoder); /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); /* step7: enable backlight */ intel_backlight_enable(crtc_state, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); intel_crtc_vblank_on(crtc_state); } static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "DSI trancoder not disabled\n"); } } static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); /* ensure cmds dispatched to panel */ wait_for_cmds_dispatched_to_panel(encoder); } static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; u32 tmp; /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)); tmp |= LINK_ENTER_ULPS; tmp &= ~LINK_ULPS_TYPE_LP11; intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp); if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & LINK_IN_ULPS), 10)) drm_err(&dev_priv->drm, "DSI link not in ULPS\n"); } /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), TRANS_DDI_FUNC_ENABLE, 0); } /* disable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), PORT_SYNC_MODE_ENABLE, 0); } } } static void gen11_dsi_disable_port(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) drm_err(&dev_priv->drm, "DDI port:%c buffer not idle\n", port_name(port)); } gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { intel_wakeref_t wakeref; wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); intel_display_power_put(dev_priv, port == PORT_A ? POWER_DOMAIN_PORT_DDI_IO_A : POWER_DOMAIN_PORT_DDI_IO_B, wakeref); } /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), COMBO_PHY_MODE_DSI, 0); } static void gen11_dsi_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_backlight_disable(old_conn_state); } static void gen11_dsi_post_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); intel_crtc_vblank_off(old_crtc_state); /* step2d,e: disable transcoder and wait */ gen11_dsi_disable_transcoder(encoder); /* Wa_1409054076:icl,jsl,ehl */ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, false); /* step2f,g: powerdown panel */ gen11_dsi_powerdown_panel(encoder); /* step2h,i,j: deconfig trancoder */ gen11_dsi_deconfigure_trancoder(encoder); intel_dsc_disable(old_crtc_state); skl_scaler_disable(old_crtc_state); /* step3: disable port */ gen11_dsi_disable_port(encoder); gen11_dsi_config_util_pin(encoder, false); /* step4: disable IO power */ gen11_dsi_disable_io_power(encoder); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); msleep(intel_dsi->panel_off_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); intel_dsi->panel_power_off_time = ktime_get_boottime(); } static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { /* FIXME: DSC? */ return intel_dsi_mode_valid(connector, mode); } static void gen11_dsi_get_timings(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; if (pipe_config->dsc.compressed_bpp) { int div = pipe_config->dsc.compressed_bpp; int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); adjusted_mode->crtc_htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); adjusted_mode->crtc_hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div); adjusted_mode->crtc_hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div); } if (intel_dsi->dual_link) { adjusted_mode->crtc_hdisplay *= 2; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) adjusted_mode->crtc_hdisplay -= intel_dsi->pixel_overlap; adjusted_mode->crtc_htotal *= 2; } adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { if (intel_dsi->dual_link) { adjusted_mode->crtc_hsync_start *= 2; adjusted_mode->crtc_hsync_end *= 2; } } adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; } static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder dsi_trans; u32 val; if (intel_dsi->ports == BIT(PORT_B)) dsi_trans = TRANSCODER_DSI_1; else dsi_trans = TRANSCODER_DSI_0; val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); } static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi, struct intel_crtc_state *pipe_config) { if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A))) pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0; else if (intel_dsi->ports == BIT(PORT_B)) pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1; else pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE0; } static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder)); pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) pipe_config->hw.adjusted_mode.crtc_clock *= 2; gen11_dsi_get_timings(encoder, pipe_config); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); /* Get the details on which TE should be enabled */ if (is_cmd_mode(intel_dsi)) gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config); if (gen11_dsi_is_periodic_cmd_mode(intel_dsi)) pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; } static void gen11_dsi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc; enum pipe pipe; if (!crtc_state) return; intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); pipe = intel_crtc->pipe; /* wa verify 1409054076:icl,jsl,ehl */ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n", encoder->base.base.id, encoder->base.name); } static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10; bool use_dsc; int ret; use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc); if (!use_dsc) return 0; if (crtc_state->pipe_bpp < 8 * 3) return -EINVAL; /* FIXME: split only when necessary */ if (crtc_state->dsc.slice_count > 1) crtc_state->dsc.dsc_split = true; /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; /* DSI specific sanity checks on the common code */ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable); drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422); drm_WARN_ON(&dev_priv->drm, vdsc_cfg->pic_width % vdsc_cfg->slice_width); drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8); drm_WARN_ON(&dev_priv->drm, vdsc_cfg->pic_height % vdsc_cfg->slice_height); ret = drm_dsc_compute_rc_parameters(vdsc_cfg); if (ret) return ret; crtc_state->dsc.compression_enable = true; return 0; } static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; ret = intel_panel_compute_config(intel_connector, adjusted_mode); if (ret) return ret; ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; adjusted_mode->flags = 0; /* Dual link goes to trancoder DSI'0' */ if (intel_dsi->ports == BIT(PORT_B)) pipe_config->cpu_transcoder = TRANSCODER_DSI_1; else pipe_config->cpu_transcoder = TRANSCODER_DSI_0; if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) pipe_config->pipe_bpp = 24; else pipe_config->pipe_bpp = 18; pipe_config->clock_set = true; if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n"); pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; /* * In case of TE GATE cmd mode, we * receive TE from the slave if * dual link is enabled */ if (is_cmd_mode(intel_dsi)) gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config); return 0; } static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); get_dsi_io_power_domains(i915, enc_to_intel_dsi(encoder)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; intel_wakeref_t wakeref; enum port port; bool ret = false; u32 tmp; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: *pipe = PIPE_A; break; case TRANS_DDI_EDP_INPUT_B_ONOFF: *pipe = PIPE_B; break; case TRANS_DDI_EDP_INPUT_C_ONOFF: *pipe = PIPE_C; break; case TRANS_DDI_EDP_INPUT_D_ONOFF: *pipe = PIPE_D; break; default: drm_err(&dev_priv->drm, "Invalid PIPE input\n"); goto out; } tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans)); ret = tmp & TRANSCONF_ENABLE; } out: intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } static bool gen11_dsi_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { if (crtc_state->dsc.compression_enable) { drm_dbg_kms(encoder->base.dev, "Forcing full modeset due to DSC being enabled\n"); crtc_state->uapi.mode_changed = true; return false; } return true; } static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) { intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { .destroy = gen11_dsi_encoder_destroy, }; static const struct drm_connector_funcs gen11_dsi_connector_funcs = { .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, .mode_valid = gen11_dsi_mode_valid, .atomic_check = intel_digital_connector_atomic_check, }; static int gen11_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dsi) { return 0; } static int gen11_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dsi) { return 0; } static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); struct mipi_dsi_packet dsi_pkt; ssize_t ret; bool enable_lpdt = false; ret = mipi_dsi_create_packet(&dsi_pkt, msg); if (ret < 0) return ret; if (msg->flags & MIPI_DSI_MSG_USE_LPM) enable_lpdt = true; /* only long packet contains payload */ if (mipi_dsi_packet_format_is_long(msg->type)) { ret = dsi_send_pkt_payld(intel_dsi_host, &dsi_pkt); if (ret < 0) return ret; } /* send packet header */ ret = dsi_send_pkt_hdr(intel_dsi_host, &dsi_pkt, enable_lpdt); if (ret < 0) return ret; //TODO: add payload receive code if needed ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; return ret; } static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { .attach = gen11_dsi_host_attach, .detach = gen11_dsi_host_detach, .transfer = gen11_dsi_host_transfer, }; #define ICL_PREPARE_CNT_MAX 0x7 #define ICL_CLK_ZERO_CNT_MAX 0xf #define ICL_TRAIL_CNT_MAX 0x7 #define ICL_TCLK_PRE_CNT_MAX 0x3 #define ICL_TCLK_POST_CNT_MAX 0x7 #define ICL_HS_ZERO_CNT_MAX 0xf #define ICL_EXIT_ZERO_CNT_MAX 0x7 static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns; u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; u32 ths_prepare_ns, tclk_trail_ns; u32 hs_zero_cnt; u32 tclk_pre_cnt, tclk_post_cnt; tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); /* * prepare cnt in escape clocks * this field represents a hexadecimal value with a precision * of 1.2 – i.e. the most significant bit is the integer * and the least significant 2 bits are fraction bits. * so, the field can represent a range of 0.25 to 1.75 */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); if (prepare_cnt > ICL_PREPARE_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n", prepare_cnt); prepare_cnt = ICL_PREPARE_CNT_MAX; } /* clk zero count in escape clocks */ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - ths_prepare_ns, tlpx_ns); if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } /* trail cnt in escape clocks*/ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); if (trail_cnt > ICL_TRAIL_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n", trail_cnt); trail_cnt = ICL_TRAIL_CNT_MAX; } /* tclk pre count in escape clocks */ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; } /* tclk post count in escape clocks */ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "tclk_post_cnt out of range (%d)\n", tclk_post_cnt); tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; } /* hs zero cnt in escape clocks */ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - ths_prepare_ns, tlpx_ns); if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n", hs_zero_cnt); hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; } /* hs exit zero cnt in escape clocks */ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { drm_dbg_kms(&dev_priv->drm, "exit_zero_cnt out of range (%d)\n", exit_zero_cnt); exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; } /* clock lane dphy timings */ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | CLK_PREPARE(prepare_cnt) | CLK_ZERO_OVERRIDE | CLK_ZERO(clk_zero_cnt) | CLK_PRE_OVERRIDE | CLK_PRE(tclk_pre_cnt) | CLK_POST_OVERRIDE | CLK_POST(tclk_post_cnt) | CLK_TRAIL_OVERRIDE | CLK_TRAIL(trail_cnt)); /* data lanes dphy timings */ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | HS_PREPARE(prepare_cnt) | HS_ZERO_OVERRIDE | HS_ZERO(hs_zero_cnt) | HS_TRAIL_OVERRIDE | HS_TRAIL(trail_cnt) | HS_EXIT_OVERRIDE | HS_EXIT(exit_zero_cnt)); intel_dsi_log_params(intel_dsi); } static void icl_dsi_add_properties(struct intel_connector *connector) { const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); intel_attach_scaling_mode_property(&connector->base); drm_connector_set_panel_orientation_with_quirk(&connector->base, intel_dsi_get_panel_orientation(connector), fixed_mode->hdisplay, fixed_mode->vdisplay); } void icl_dsi_init(struct drm_i915_private *dev_priv, const struct intel_bios_encoder_data *devdata) { struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; enum port port; port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) return; intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(intel_dsi); return; } encoder = &intel_dsi->base; intel_dsi->attached_connector = intel_connector; connector = &intel_connector->base; encoder->devdata = devdata; /* register DSI encoder with DRM subsystem */ drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; encoder->pre_enable = gen11_dsi_pre_enable; encoder->enable = gen11_dsi_enable; encoder->disable = gen11_dsi_disable; encoder->post_disable = gen11_dsi_post_disable; encoder->port = port; encoder->get_config = gen11_dsi_get_config; encoder->sync_state = gen11_dsi_sync_state; encoder->update_pipe = intel_backlight_update; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->initial_fastset_check = gen11_dsi_initial_fastset_check; encoder->type = INTEL_OUTPUT_DSI; encoder->cloneable = 0; encoder->pipe_mask = ~0; encoder->power_domain = POWER_DOMAIN_PORT_DSI; encoder->get_power_domains = gen11_dsi_get_power_domains; encoder->disable_clock = gen11_dsi_gate_clocks; encoder->is_clock_enabled = gen11_dsi_is_clock_enabled; encoder->shutdown = intel_dsi_shutdown; /* register DSI connector with DRM subsystem */ drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector->get_hw_state = intel_connector_get_hw_state; /* attach connector to encoder */ intel_connector_attach_encoder(intel_connector, encoder); intel_dsi->panel_power_off_time = ktime_get_boottime(); intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL); mutex_lock(&dev_priv->drm.mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); mutex_unlock(&dev_priv->drm.mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { drm_err(&dev_priv->drm, "DSI fixed mode info missing\n"); goto err; } intel_panel_init(intel_connector, NULL); intel_backlight_setup(intel_connector, INVALID_PIPE); if (intel_connector->panel.vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); else intel_dsi->ports = BIT(port); if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port); if (!host) goto err; intel_dsi->dsi_hosts[port] = host; } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { drm_dbg_kms(&dev_priv->drm, "no device found\n"); goto err; } icl_dphy_param_init(intel_dsi); icl_dsi_add_properties(intel_connector); return; err: drm_connector_cleanup(connector); drm_encoder_cleanup(&encoder->base); kfree(intel_dsi); kfree(intel_connector); }
linux-master
drivers/gpu/drm/i915/display/icl_dsi.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/drm_modeset_lock.h> #include "intel_display_types.h" #include "intel_modeset_lock.h" void _intel_modeset_lock_begin(struct drm_modeset_acquire_ctx *ctx, struct intel_atomic_state *state, unsigned int flags, int *ret) { drm_modeset_acquire_init(ctx, flags); if (state) state->base.acquire_ctx = ctx; *ret = -EDEADLK; } bool _intel_modeset_lock_loop(int *ret) { if (*ret == -EDEADLK) { *ret = 0; return true; } return false; } void _intel_modeset_lock_end(struct drm_modeset_acquire_ctx *ctx, struct intel_atomic_state *state, int *ret) { if (*ret == -EDEADLK) { if (state) drm_atomic_state_clear(&state->base); *ret = drm_modeset_backoff(ctx); if (*ret == 0) { *ret = -EDEADLK; return; } } drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); }
linux-master
drivers/gpu/drm/i915/display/intel_modeset_lock.c
/* * Copyright © 2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/kernel.h> #include "i915_drv.h" #include "i915_irq.h" #include "intel_display_types.h" #include "intel_hotplug.h" #include "intel_hotplug_irq.h" /** * DOC: Hotplug * * Simply put, hotplug occurs when a display is connected to or disconnected * from the system. However, there may be adapters and docking stations and * Display Port short pulses and MST devices involved, complicating matters. * * Hotplug in i915 is handled in many different levels of abstraction. * * The platform dependent interrupt handling code in i915_irq.c enables, * disables, and does preliminary handling of the interrupts. The interrupt * handlers gather the hotplug detect (HPD) information from relevant registers * into a platform independent mask of hotplug pins that have fired. * * The platform independent interrupt handler intel_hpd_irq_handler() in * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes * further processing to appropriate bottom halves (Display Port specific and * regular hotplug). * * The Display Port work function i915_digport_work_func() calls into * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long * pulses, with failures and non-MST long pulses triggering regular hotplug * processing on the connector. * * The regular hotplug work function i915_hotplug_work_func() calls connector * detect hooks, and, if connector status changes, triggers sending of hotplug * uevent to userspace via drm_kms_helper_hotplug_event(). * * Finally, the userspace is responsible for triggering a modeset upon receiving * the hotplug uevent, disabling or enabling the crtc as needed. * * The hotplug interrupt storm detection and mitigation code keeps track of the * number of interrupts per hotplug pin per a period of time, and if the number * of interrupts exceeds a certain threshold, the interrupt is disabled for a * while before being re-enabled. The intention is to mitigate issues raising * from broken hardware triggering massive amounts of interrupts and grinding * the system to a halt. * * Current implementation expects that hotplug interrupt storm will not be * seen when display port sink is connected, hence on platforms whose DP * callback is handled by i915_digport_work_func reenabling of hpd is not * performed (it was never expected to be disabled in the first place ;) ) * this is specific to DP sinks handled by this routine and any other display * such as HDMI or DVI enabled on the same port will have proper logic since * it will use i915_hotplug_work_func where this logic is handled. */ /** * intel_hpd_pin_default - return default pin associated with certain port. * @dev_priv: private driver data pointer * @port: the hpd port to get associated pin * * It is only valid and used by digital port encoder. * * Return pin that is associatade with @port. */ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port) { return HPD_PORT_A + port - PORT_A; } /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) #define HPD_RETRY_DELAY 1000 static enum hpd_pin intel_connector_hpd_pin(struct intel_connector *connector) { struct intel_encoder *encoder = intel_attached_encoder(connector); /* * MST connectors get their encoder attached dynamically * so need to make sure we have an encoder here. But since * MST encoders have their hpd_pin set to HPD_NONE we don't * have to special case them beyond that. */ return encoder ? encoder->hpd_pin : HPD_NONE; } /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin * @dev_priv: private driver data pointer * @pin: the pin to gather stats on * @long_hpd: whether the HPD IRQ was long or short * * Gather stats about HPD IRQs from the specified @pin, and detect IRQ * storms. Only the pin specific stats and state are changed, the caller is * responsible for further action. * * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and * short IRQs count as +1. If this threshold is exceeded, it's considered an * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. * * By default, most systems will only count long IRQs towards * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also * suffer from short IRQ storms and must also track these. Because short IRQ * storms are naturally caused by sideband interactions with DP MST devices, * short IRQ detection is only enabled for systems without DP MST support. * Systems which are new enough to support DP MST are far less likely to * suffer from IRQ storms at all, so this is fine. * * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, * and should only be adjusted for automated hotplug testing. * * Return true if an IRQ storm was detected on @pin. */ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, enum hpd_pin pin, bool long_hpd) { struct intel_hotplug *hpd = &dev_priv->display.hotplug; unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); const int increment = long_hpd ? 10 : 1; const int threshold = hpd->hpd_storm_threshold; bool storm = false; if (!threshold || (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) return false; if (!time_in_range(jiffies, start, end)) { hpd->stats[pin].last_jiffies = jiffies; hpd->stats[pin].count = 0; } hpd->stats[pin].count += increment; if (hpd->stats[pin].count > threshold) { hpd->stats[pin].state = HPD_MARK_DISABLED; drm_dbg_kms(&dev_priv->drm, "HPD interrupt storm detected on PIN %d\n", pin); storm = true; } else { drm_dbg_kms(&dev_priv->drm, "Received HPD interrupt on PIN %d - cnt: %d\n", pin, hpd->stats[pin].count); } return storm; } static void intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) { struct drm_connector_list_iter conn_iter; struct intel_connector *connector; bool hpd_disabled = false; lockdep_assert_held(&dev_priv->irq_lock); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) continue; pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; drm_info(&dev_priv->drm, "HPD interrupt storm detected on connector %s: " "switching from hotplug detection to polling\n", connector->base.name); dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; } drm_connector_list_iter_end(&conn_iter); /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { drm_kms_helper_poll_reschedule(&dev_priv->drm); mod_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), display.hotplug.reenable_work.work); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; intel_wakeref_t wakeref; enum hpd_pin pin; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) continue; if (connector->base.polled != connector->polled) drm_dbg(&dev_priv->drm, "Reenabling HPD on connector %s\n", connector->base.name); connector->base.polled = connector->polled; } drm_connector_list_iter_end(&conn_iter); for_each_hpd_pin(pin) { if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; } intel_hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; u64 old_epoch_counter; bool ret = false; drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); old_status = connector->base.status; old_epoch_counter = connector->base.epoch_counter; connector->base.status = drm_helper_probe_detect(&connector->base, NULL, false); if (old_epoch_counter != connector->base.epoch_counter) ret = true; if (ret) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", connector->base.base.id, connector->base.name, drm_get_connector_status_name(old_status), drm_get_connector_status_name(connector->base.status), old_epoch_counter, connector->base.epoch_counter); return INTEL_HOTPLUG_CHANGED; } return INTEL_HOTPLUG_UNCHANGED; } static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) { return intel_encoder_is_dig_port(encoder) && enc_to_dig_port(encoder)->hpd_pulse != NULL; } static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); u32 long_port_mask, short_port_mask; struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); long_port_mask = dev_priv->display.hotplug.long_port_mask; dev_priv->display.hotplug.long_port_mask = 0; short_port_mask = dev_priv->display.hotplug.short_port_mask; dev_priv->display.hotplug.short_port_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); for_each_intel_encoder(&dev_priv->drm, encoder) { struct intel_digital_port *dig_port; enum port port = encoder->port; bool long_hpd, short_hpd; enum irqreturn ret; if (!intel_encoder_has_hpd_pulse(encoder)) continue; long_hpd = long_port_mask & BIT(port); short_hpd = short_port_mask & BIT(port); if (!long_hpd && !short_hpd) continue; dig_port = enc_to_dig_port(encoder); ret = dig_port->hpd_pulse(dig_port, long_hpd); if (ret == IRQ_NONE) { /* fall back to old school hpd */ old_bits |= BIT(encoder->hpd_pin); } } if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); queue_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.hotplug_work, 0); } } /** * intel_hpd_trigger_irq - trigger an hpd irq event for a port * @dig_port: digital port * * Trigger an HPD interrupt event for the given port, emulating a short pulse * generated by the sink, and schedule the dig port work to handle it. */ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); spin_lock_irq(&i915->irq_lock); i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); spin_unlock_irq(&i915->irq_lock); queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); } /* * Handle hotplug events outside the interrupt handler proper. */ static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, display.hotplug.hotplug_work.work); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; u32 changed = 0, retry = 0; u32 hpd_event_bits; u32 hpd_retry_bits; struct drm_connector *first_changed_connector = NULL; int changed_connectors = 0; mutex_lock(&dev_priv->drm.mode_config.mutex); drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); spin_lock_irq(&dev_priv->irq_lock); hpd_event_bits = dev_priv->display.hotplug.event_bits; dev_priv->display.hotplug.event_bits = 0; hpd_retry_bits = dev_priv->display.hotplug.retry_bits; dev_priv->display.hotplug.retry_bits = 0; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); /* Skip calling encode hotplug handlers if ignore long HPD set*/ if (dev_priv->display.hotplug.ignore_long_hpd) { drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); mutex_unlock(&dev_priv->drm.mode_config.mutex); return; } drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; u32 hpd_bit; pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE) continue; hpd_bit = BIT(pin); if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { struct intel_encoder *encoder = intel_attached_encoder(connector); if (hpd_event_bits & hpd_bit) connector->hotplug_retries = 0; else connector->hotplug_retries++; drm_dbg_kms(&dev_priv->drm, "Connector %s (pin %i) received hotplug event. (retry %d)\n", connector->base.name, pin, connector->hotplug_retries); switch (encoder->hotplug(encoder, connector)) { case INTEL_HOTPLUG_UNCHANGED: break; case INTEL_HOTPLUG_CHANGED: changed |= hpd_bit; changed_connectors++; if (!first_changed_connector) { drm_connector_get(&connector->base); first_changed_connector = &connector->base; } break; case INTEL_HOTPLUG_RETRY: retry |= hpd_bit; break; } } } drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev_priv->drm.mode_config.mutex); if (changed_connectors == 1) drm_kms_helper_connector_hotplug_event(first_changed_connector); else if (changed_connectors > 0) drm_kms_helper_hotplug_event(&dev_priv->drm); if (first_changed_connector) drm_connector_put(first_changed_connector); /* Remove shared HPD pins that have changed */ retry &= ~changed; if (retry) { spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.retry_bits |= retry; spin_unlock_irq(&dev_priv->irq_lock); mod_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); } } /** * intel_hpd_irq_handler - main hotplug irq handler * @dev_priv: drm_i915_private * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * * This is the main hotplug irq handler for all platforms. The platform specific * irq handlers call the platform specific hotplug irq handlers, which read and * decode the appropriate registers into bitmasks about hpd pins that have * triggered (@pin_mask), and which of those pins may be long pulses * (@long_mask). The @long_mask is ignored if the port corresponding to the pin * is not a digital port. * * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; u32 long_hpd_pulse_mask = 0; u32 short_hpd_pulse_mask = 0; enum hpd_pin pin; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); /* * Determine whether ->hpd_pulse() exists for each pin, and * whether we have a short or a long pulse. This is needed * as each pin may have up to two encoders (HDMI and DP) and * only the one of them (DP) will have ->hpd_pulse(). */ for_each_intel_encoder(&dev_priv->drm, encoder) { enum port port = encoder->port; bool long_hpd; pin = encoder->hpd_pin; if (!(BIT(pin) & pin_mask)) continue; if (!intel_encoder_has_hpd_pulse(encoder)) continue; long_hpd = long_mask & BIT(pin); drm_dbg(&dev_priv->drm, "digital hpd on [ENCODER:%d:%s] - %s\n", encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); queue_dig = true; if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); dev_priv->display.hotplug.long_port_mask |= BIT(port); } else { short_hpd_pulse_mask |= BIT(pin); dev_priv->display.hotplug.short_port_mask |= BIT(port); } } /* Now process each pin just once */ for_each_hpd_pin(pin) { bool long_hpd; if (!(BIT(pin) & pin_mask)) continue; if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), "Received HPD interrupt on pin %d although disabled\n", pin); continue; } if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) continue; /* * Delegate to ->hpd_pulse() if one of the encoders for this * pin has it, otherwise let the hotplug_work deal with this * pin directly. */ if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { long_hpd = long_hpd_pulse_mask & BIT(pin); } else { dev_priv->display.hotplug.event_bits |= BIT(pin); long_hpd = true; queue_hp = true; } if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { dev_priv->display.hotplug.event_bits &= ~BIT(pin); storm_detected = true; queue_hp = true; } } /* * Disable any IRQs that storms were detected on. Polling enablement * happens later in our hotplug work. */ if (storm_detected) intel_hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); /* * Our hotplug handler can grab modeset locks (by calling down into the * fb helpers). Hence it must not be run on our own dev-priv->wq work * queue for otherwise the flush_work in the pageflip code will * deadlock. */ if (queue_dig) queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); if (queue_hp) queue_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.hotplug_work, 0); } /** * intel_hpd_init - initializes and enables hpd support * @dev_priv: i915 device instance * * This function enables the hotplug support. It requires that interrupts have * already been enabled with intel_irq_init_hw(). From this point on hotplug and * poll request can run concurrently to other code, so locking rules must be * obeyed. * * This is a separate step from interrupt enabling to simplify the locking rules * in the driver load and resume code. * * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). */ void intel_hpd_init(struct drm_i915_private *dev_priv) { int i; if (!HAS_DISPLAY(dev_priv)) return; for_each_hpd_pin(i) { dev_priv->display.hotplug.stats[i].count = 0; dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; } /* * Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ spin_lock_irq(&dev_priv->irq_lock); intel_hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); } static void i915_hpd_poll_init_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, display.hotplug.poll_init_work); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; bool enabled; mutex_lock(&dev_priv->drm.mode_config.mutex); enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE) continue; connector->base.polled = connector->polled; if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } drm_connector_list_iter_end(&conn_iter); if (enabled) drm_kms_helper_poll_reschedule(&dev_priv->drm); mutex_unlock(&dev_priv->drm.mode_config.mutex); /* * We might have missed any hotplugs that happened while we were * in the middle of disabling polling */ if (!enabled) drm_helper_hpd_irq_event(&dev_priv->drm); } /** * intel_hpd_poll_enable - enable polling for connectors with hpd * @dev_priv: i915 device instance * * This function enables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, * this happens when we enter runtime suspend. * On Valleyview and Cherryview systems, this also happens when we shut off all * of the powerwells. * * Since this function can get called in contexts where we're already holding * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate * worker. * * Also see: intel_hpd_init() and intel_hpd_poll_disable(). */ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return; WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); /* * We might already be holding dev->mode_config.mutex, so do this in a * seperate worker * As well, there's no issue if we race here since we always reschedule * this worker anyway */ queue_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.poll_init_work); } /** * intel_hpd_poll_disable - disable polling for connectors with hpd * @dev_priv: i915 device instance * * This function disables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, * this happens when we enter runtime suspend. * On Valleyview and Cherryview systems, this also happens when we shut off all * of the powerwells. * * Since this function can get called in contexts where we're already holding * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate * worker. * * Also used during driver init to initialize connector->polled * appropriately for all connectors. * * Also see: intel_hpd_init() and intel_hpd_poll_enable(). */ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) return; WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); queue_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.poll_init_work); } void intel_hpd_init_early(struct drm_i915_private *i915) { INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, i915_hotplug_work_func); INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; /* If we have MST support, we want to avoid doing short HPD IRQ storm * detection, as short HPD storms will occur as a natural part of * sideband messaging with MST. * On older platforms however, IRQ storms can occur with both long and * short pulses, as seen on some G4x systems. */ i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); } void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.long_port_mask = 0; dev_priv->display.hotplug.short_port_mask = 0; dev_priv->display.hotplug.event_bits = 0; dev_priv->display.hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); } bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) { bool ret = false; if (pin == HPD_NONE) return false; spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; ret = true; } spin_unlock_irq(&dev_priv->irq_lock); return ret; } void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) { if (pin == HPD_NONE) return; spin_lock_irq(&dev_priv->irq_lock); dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; spin_unlock_irq(&dev_priv->irq_lock); } static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; struct intel_hotplug *hotplug = &dev_priv->display.hotplug; /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ intel_synchronize_irq(dev_priv); flush_work(&dev_priv->display.hotplug.dig_port_work); flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", str_yes_no(delayed_work_pending(&hotplug->reenable_work))); return 0; } static ssize_t i915_hpd_storm_ctl_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; struct intel_hotplug *hotplug = &dev_priv->display.hotplug; unsigned int new_threshold; int i; char *newline; char tmp[16]; if (len >= sizeof(tmp)) return -EINVAL; if (copy_from_user(tmp, ubuf, len)) return -EFAULT; tmp[len] = '\0'; /* Strip newline, if any */ newline = strchr(tmp, '\n'); if (newline) *newline = '\0'; if (strcmp(tmp, "reset") == 0) new_threshold = HPD_STORM_DEFAULT_THRESHOLD; else if (kstrtouint(tmp, 10, &new_threshold) != 0) return -EINVAL; if (new_threshold > 0) drm_dbg_kms(&dev_priv->drm, "Setting HPD storm detection threshold to %d\n", new_threshold); else drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); spin_lock_irq(&dev_priv->irq_lock); hotplug->hpd_storm_threshold = new_threshold; /* Reset the HPD storm stats so we don't accidentally trigger a storm */ for_each_hpd_pin(i) hotplug->stats[i].count = 0; spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) { return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); } static const struct file_operations i915_hpd_storm_ctl_fops = { .owner = THIS_MODULE, .open = i915_hpd_storm_ctl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_hpd_storm_ctl_write }; static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; seq_printf(m, "Enabled: %s\n", str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); return 0; } static int i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) { return single_open(file, i915_hpd_short_storm_ctl_show, inode->i_private); } static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; struct intel_hotplug *hotplug = &dev_priv->display.hotplug; char *newline; char tmp[16]; int i; bool new_state; if (len >= sizeof(tmp)) return -EINVAL; if (copy_from_user(tmp, ubuf, len)) return -EFAULT; tmp[len] = '\0'; /* Strip newline, if any */ newline = strchr(tmp, '\n'); if (newline) *newline = '\0'; /* Reset to the "default" state for this system */ if (strcmp(tmp, "reset") == 0) new_state = !HAS_DP_MST(dev_priv); else if (kstrtobool(tmp, &new_state) != 0) return -EINVAL; drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", new_state ? "En" : "Dis"); spin_lock_irq(&dev_priv->irq_lock); hotplug->hpd_short_storm_enabled = new_state; /* Reset the HPD storm stats so we don't accidentally trigger a storm */ for_each_hpd_pin(i) hotplug->stats[i].count = 0; spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } static const struct file_operations i915_hpd_short_storm_ctl_fops = { .owner = THIS_MODULE, .open = i915_hpd_short_storm_ctl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_hpd_short_storm_ctl_write, }; void intel_hpd_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, i915, &i915_hpd_storm_ctl_fops); debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, i915, &i915_hpd_short_storm_ctl_fops); debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, &i915->display.hotplug.ignore_long_hpd); }
linux-master
drivers/gpu/drm/i915/display/intel_hotplug.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <linux/pci.h> #include <linux/vgaarb.h> #include <video/vga.h> #include "soc/intel_gmch.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_vga.h" static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) { if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) return VLV_VGACNTRL; else if (DISPLAY_VER(i915) >= 5) return CPU_VGACNTRL; else return VGACNTRL; } /* Disable the VGA plane that we never use */ void intel_vga_disable(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); u8 sr1; if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE) return; /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); outb(0x01, VGA_SEQ_I); sr1 = inb(VGA_SEQ_D); outb(sr1 | VGA_SR01_SCREEN_OFF, VGA_SEQ_D); vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE); intel_de_posting_read(dev_priv, vga_reg); } void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv) { i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) { drm_dbg_kms(&dev_priv->drm, "Something enabled VGA plane, disabling it\n"); intel_vga_disable(dev_priv); } } void intel_vga_redisable(struct drm_i915_private *i915) { intel_wakeref_t wakeref; /* * This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very * paranoid "someone might have enabled VGA while we were not looking" * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy * the rest of the driver uses. */ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA); if (!wakeref) return; intel_vga_redisable_power_on(i915); intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref); } void intel_vga_reset_io_mem(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); /* * After we re-enable the power well, if we touch VGA register 0x3d5 * we'll get unclaimed register interrupts. This stops after we write * anything to the VGA MSR register. The vgacon module uses this * register all the time, so if we unbind our driver and, as a * consequence, bind vgacon, we'll get stuck in an infinite loop at * console_unlock(). So make here we touch the VGA MSR register, making * sure vgacon can keep working normally without triggering interrupts * and error messages. */ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); outb(inb(VGA_MIS_R), VGA_MIS_W); vga_put(pdev, VGA_RSRC_LEGACY_IO); } static unsigned int intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode) { struct drm_i915_private *i915 = pdev_to_i915(pdev); intel_gmch_vga_set_state(i915, enable_decode); if (enable_decode) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } int intel_vga_register(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int ret; /* * If we have > 1 VGA cards, then we need to arbitrate access to the * common VGA resources. * * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ ret = vga_client_register(pdev, intel_vga_set_decode); if (ret && ret != -ENODEV) return ret; return 0; } void intel_vga_unregister(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); vga_client_unregister(pdev); }
linux-master
drivers/gpu/drm/i915/display/intel_vga.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022-2023 Intel Corporation * * High level display driver entry points. This is a layer between top level * driver code and low level display functionality; no low level display code or * details here. */ #include <linux/vga_switcheroo.h> #include <acpi/video.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_mode_config.h> #include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "i915_drv.h" #include "i9xx_wm.h" #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_bios.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_display_debugfs.h" #include "intel_display_driver.h" #include "intel_display_irq.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fdi.h" #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hotplug.h" #include "intel_hti.h" #include "intel_modeset_setup.h" #include "intel_opregion.h" #include "intel_overlay.h" #include "intel_plane_initial.h" #include "intel_pmdemand.h" #include "intel_pps.h" #include "intel_quirks.h" #include "intel_vga.h" #include "intel_wm.h" #include "skl_watermark.h" bool intel_display_driver_probe_defer(struct pci_dev *pdev) { struct drm_privacy_screen *privacy_screen; /* * apple-gmux is needed on dual GPU MacBook Pro * to probe the panel if we're the inactive GPU. */ if (vga_switcheroo_client_probe_defer(pdev)) return true; /* If the LCD panel has a privacy-screen, wait for it */ privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) return true; drm_privacy_screen_put(privacy_screen); return false; } void intel_display_driver_init_hw(struct drm_i915_private *i915) { struct intel_cdclk_state *cdclk_state; if (!HAS_DISPLAY(i915)) return; cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); intel_update_cdclk(i915); intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .get_format_info = intel_fb_get_format_info, .output_poll_changed = intel_fbdev_output_poll_changed, .mode_valid = intel_mode_valid, .atomic_check = intel_atomic_check, .atomic_commit = intel_atomic_commit, .atomic_state_alloc = intel_atomic_state_alloc, .atomic_state_clear = intel_atomic_state_clear, .atomic_state_free = intel_atomic_state_free, }; static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, }; static void intel_mode_config_init(struct drm_i915_private *i915) { struct drm_mode_config *mode_config = &i915->drm.mode_config; drm_mode_config_init(&i915->drm); INIT_LIST_HEAD(&i915->display.global.obj_list); mode_config->min_width = 0; mode_config->min_height = 0; mode_config->preferred_depth = 24; mode_config->prefer_shadow = 1; mode_config->funcs = &intel_mode_funcs; mode_config->helper_private = &intel_mode_config_funcs; mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); /* * Maximum framebuffer dimensions, chosen to match * the maximum render engine surface size on gen4+. */ if (DISPLAY_VER(i915) >= 7) { mode_config->max_width = 16384; mode_config->max_height = 16384; } else if (DISPLAY_VER(i915) >= 4) { mode_config->max_width = 8192; mode_config->max_height = 8192; } else if (DISPLAY_VER(i915) == 3) { mode_config->max_width = 4096; mode_config->max_height = 4096; } else { mode_config->max_width = 2048; mode_config->max_height = 2048; } if (IS_I845G(i915) || IS_I865G(i915)) { mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; mode_config->cursor_height = 1023; } else if (IS_I830(i915) || IS_I85X(i915) || IS_I915G(i915) || IS_I915GM(i915)) { mode_config->cursor_width = 64; mode_config->cursor_height = 64; } else { mode_config->cursor_width = 256; mode_config->cursor_height = 256; } } static void intel_mode_config_cleanup(struct drm_i915_private *i915) { intel_atomic_global_obj_cleanup(i915); drm_mode_config_cleanup(&i915->drm); } static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) { struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); } } void intel_display_driver_early_probe(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; intel_display_irq_init(i915); intel_dkl_phy_init(i915); intel_color_init_hooks(i915); intel_init_cdclk_hooks(i915); intel_audio_hooks_init(i915); intel_dpll_init_clock_hook(i915); intel_init_display_hooks(i915); intel_fdi_init_hook(i915); } /* part #1: call before irq install */ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) { int ret; if (i915_inject_probe_failure(i915)) return -ENODEV; if (HAS_DISPLAY(i915)) { ret = drm_vblank_init(&i915->drm, INTEL_NUM_PIPES(i915)); if (ret) return ret; } intel_bios_init(i915); ret = intel_vga_register(i915); if (ret) goto cleanup_bios; /* FIXME: completely on the wrong abstraction layer */ ret = intel_power_domains_init(i915); if (ret < 0) goto cleanup_vga; intel_pmdemand_init_early(i915); intel_power_domains_init_hw(i915, false); if (!HAS_DISPLAY(i915)) return 0; intel_dmc_init(i915); i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); intel_mode_config_init(i915); ret = intel_cdclk_init(i915); if (ret) goto cleanup_vga_client_pw_domain_dmc; ret = intel_color_init(i915); if (ret) goto cleanup_vga_client_pw_domain_dmc; ret = intel_dbuf_init(i915); if (ret) goto cleanup_vga_client_pw_domain_dmc; ret = intel_bw_init(i915); if (ret) goto cleanup_vga_client_pw_domain_dmc; ret = intel_pmdemand_init(i915); if (ret) goto cleanup_vga_client_pw_domain_dmc; init_llist_head(&i915->display.atomic_helper.free_list); INIT_WORK(&i915->display.atomic_helper.free_work, intel_atomic_helper_free_state_worker); intel_init_quirks(i915); intel_fbc_init(i915); return 0; cleanup_vga_client_pw_domain_dmc: intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); cleanup_vga: intel_vga_unregister(i915); cleanup_bios: intel_bios_driver_remove(i915); return ret; } /* part #2: call after irq install, but before gem init */ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) { struct drm_device *dev = &i915->drm; enum pipe pipe; struct intel_crtc *crtc; int ret; if (!HAS_DISPLAY(i915)) return 0; intel_wm_init(i915); intel_panel_sanitize_ssc(i915); intel_pps_setup(i915); intel_gmbus_setup(i915); drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", INTEL_NUM_PIPES(i915), INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); if (ret) { intel_mode_config_cleanup(i915); return ret; } } intel_plane_possible_crtcs_init(i915); intel_shared_dpll_init(i915); intel_fdi_pll_freq_update(i915); intel_update_czclk(i915); intel_display_driver_init_hw(i915); intel_dpll_update_ref_clks(i915); intel_hdcp_component_init(i915); if (i915->display.cdclk.max_cdclk_freq == 0) intel_update_max_cdclk(i915); intel_hti_init(i915); /* Just disable it once at startup */ intel_vga_disable(i915); intel_setup_outputs(i915); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx); intel_acpi_assign_connector_fwnodes(i915); drm_modeset_unlock_all(dev); for_each_intel_crtc(dev, crtc) { if (!to_intel_crtc_state(crtc->base.state)->uapi.active) continue; intel_crtc_initial_plane_config(crtc); } /* * Make sure hardware watermarks really match the state we read out. * Note that we need to do this after reconstructing the BIOS fb's * since the watermark calculation done here will use pstate->fb. */ if (!HAS_GMCH(i915)) ilk_wm_sanitize(i915); return 0; } /* part #3: call after gem init */ int intel_display_driver_probe(struct drm_i915_private *i915) { int ret; if (!HAS_DISPLAY(i915)) return 0; /* * Force all active planes to recompute their states. So that on * mode_setcrtc after probe, all the intel_plane_state variables * are already calculated and there is no assert_plane warnings * during bootup. */ ret = intel_initial_commit(&i915->drm); if (ret) drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); intel_overlay_setup(i915); ret = intel_fbdev_init(&i915->drm); if (ret) return ret; /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(i915); intel_hpd_poll_disable(i915); skl_watermark_ipc_init(i915); return 0; } void intel_display_driver_register(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; /* Must be done after probing outputs */ intel_opregion_register(i915); intel_acpi_video_register(i915); intel_audio_init(i915); intel_display_debugfs_register(i915); /* * Some ports require correctly set-up hpd registers for * detection to work properly (leading to ghost connected * connector status), e.g. VGA on gm45. Hence we can only set * up the initial fbdev config after hpd irqs are fully * enabled. We do it last so that the async config cannot run * before the connectors are registered. */ intel_fbdev_initial_config_async(i915); /* * We need to coordinate the hotplugs with the asynchronous * fbdev configuration, for which we use the * fbdev->async_cookie. */ drm_kms_helper_poll_init(&i915->drm); } /* part #1: call before irq uninstall */ void intel_display_driver_remove(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; flush_workqueue(i915->display.wq.flip); flush_workqueue(i915->display.wq.modeset); flush_work(&i915->display.atomic_helper.free_work); drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); /* * MST topology needs to be suspended so we don't have any calls to * fbdev after it's finalized. MST will be destroyed later as part of * drm_mode_config_cleanup() */ intel_dp_mst_suspend(i915); } /* part #2: call after irq uninstall */ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ intel_hpd_poll_fini(i915); /* poll work can call into fbdev, hence clean that up afterwards */ intel_fbdev_fini(i915); intel_unregister_dsm_handler(); /* flush any delayed tasks or pending work */ flush_workqueue(i915->unordered_wq); intel_hdcp_component_fini(i915); intel_mode_config_cleanup(i915); intel_overlay_cleanup(i915); intel_gmbus_teardown(i915); destroy_workqueue(i915->display.wq.flip); destroy_workqueue(i915->display.wq.modeset); intel_fbc_cleanup(i915); } /* part #3: call after gem init */ void intel_display_driver_remove_nogem(struct drm_i915_private *i915) { intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); intel_vga_unregister(i915); intel_bios_driver_remove(i915); } void intel_display_driver_unregister(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) return; intel_fbdev_unregister(i915); intel_audio_deinit(i915); /* * After flushing the fbdev (incl. a late async config which * will have delayed queuing of a hotplug event), then flush * the hotplug events. */ drm_kms_helper_poll_fini(&i915->drm); drm_atomic_helper_shutdown(&i915->drm); acpi_video_unregister(); intel_opregion_unregister(i915); } /* * turn all crtc's off, but do not adjust state * This has to be paired with a call to intel_modeset_setup_hw_state. */ int intel_display_driver_suspend(struct drm_i915_private *i915) { struct drm_atomic_state *state; int ret; if (!HAS_DISPLAY(i915)) return 0; state = drm_atomic_helper_suspend(&i915->drm); ret = PTR_ERR_OR_ZERO(state); if (ret) drm_err(&i915->drm, "Suspending crtc's failed with %i\n", ret); else i915->display.restore.modeset_state = state; return ret; } int __intel_display_driver_resume(struct drm_i915_private *i915, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; int ret, i; intel_modeset_setup_hw_state(i915, ctx); intel_vga_redisable(i915); if (!state) return 0; /* * We've duplicated the state, pointers to the old state are invalid. * * Don't attempt to use the old state until we commit the duplicated state. */ for_each_new_crtc_in_state(state, crtc, crtc_state, i) { /* * Force recalculation even if we restore * current state. With fast modeset this may not result * in a modeset when the state is compatible. */ crtc_state->mode_changed = true; } /* ignore any reset values/BIOS leftovers in the WM registers */ if (!HAS_GMCH(i915)) to_intel_atomic_state(state)->skip_intermediate_wm = true; ret = drm_atomic_helper_commit_duplicated_state(state, ctx); drm_WARN_ON(&i915->drm, ret == -EDEADLK); return ret; } void intel_display_driver_resume(struct drm_i915_private *i915) { struct drm_atomic_state *state = i915->display.restore.modeset_state; struct drm_modeset_acquire_ctx ctx; int ret; if (!HAS_DISPLAY(i915)) return; i915->display.restore.modeset_state = NULL; if (state) state->acquire_ctx = &ctx; drm_modeset_acquire_init(&ctx, 0); while (1) { ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx); if (ret != -EDEADLK) break; drm_modeset_backoff(&ctx); } if (!ret) ret = __intel_display_driver_resume(i915, state, &ctx); skl_watermark_ipc_update(i915); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); if (ret) drm_err(&i915->drm, "Restoring old state failed with %i\n", ret); if (state) drm_atomic_state_put(state); }
linux-master
drivers/gpu/drm/i915/display/intel_display_driver.c
/* * Copyright 2008 Intel Corporation <[email protected]> * Copyright 2008 Red Hat <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/firmware.h> #include <acpi/video.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "intel_acpi.h" #include "intel_backlight.h" #include "intel_display_types.h" #include "intel_opregion.h" #include "intel_pci_config.h" #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define ACPI_CLID 0x01ac /* current lid state indicator */ #define ACPI_CDCK 0x01b0 /* current docking state indicator */ #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x400 #define OPREGION_ASLE_EXT_OFFSET 0x1C00 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI BIT(0) /* Mailbox #1 */ #define MBOX_SWSCI BIT(1) /* Mailbox #2 (obsolete from v2.x) */ #define MBOX_ASLE BIT(2) /* Mailbox #3 */ #define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */ #define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */ #define PCON_HEADLESS_SKU BIT(13) struct opregion_header { u8 signature[16]; u32 size; struct { u8 rsvd; u8 revision; u8 minor; u8 major; } __packed over; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u32 driver_model; u32 pcon; u8 dver[32]; u8 rsvd[124]; } __packed; /* OpRegion mailbox #1: public ACPI methods */ struct opregion_acpi { u32 drdy; /* driver readiness */ u32 csts; /* notification status */ u32 cevt; /* current event */ u8 rsvd1[20]; u32 didl[8]; /* supported display devices ID list */ u32 cpdl[8]; /* currently presented display list */ u32 cadl[8]; /* currently active display list */ u32 nadl[8]; /* next active devices list */ u32 aslp; /* ASL sleep time-out */ u32 tidx; /* toggle table index */ u32 chpd; /* current hotplug enable indicator */ u32 clid; /* current lid state*/ u32 cdck; /* current docking state */ u32 sxsw; /* Sx state resume */ u32 evts; /* ASL supported events */ u32 cnot; /* current OS notification */ u32 nrdy; /* driver status */ u32 did2[7]; /* extended supported display devices ID list */ u32 cpd2[7]; /* extended attached display devices list */ u8 rsvd2[4]; } __packed; /* OpRegion mailbox #2: SWSCI */ struct opregion_swsci { u32 scic; /* SWSCI command|status|data */ u32 parm; /* command parameters */ u32 dslp; /* driver sleep time-out */ u8 rsvd[244]; } __packed; /* OpRegion mailbox #3: ASLE */ struct opregion_asle { u32 ardy; /* driver readiness */ u32 aslc; /* ASLE interrupt command */ u32 tche; /* technology enabled indicator */ u32 alsi; /* current ALS illuminance reading */ u32 bclp; /* backlight brightness to set */ u32 pfit; /* panel fitting state */ u32 cblv; /* current brightness level */ u16 bclm[20]; /* backlight level duty cycle mapping table */ u32 cpfm; /* current panel fitting mode */ u32 epfm; /* enabled panel fitting modes */ u8 plut[74]; /* panel LUT and identifier */ u32 pfmb; /* PWM freq and min brightness */ u32 cddv; /* color correction default values */ u32 pcft; /* power conservation features */ u32 srot; /* supported rotation angles */ u32 iuer; /* IUER events */ u64 fdss; u32 fdsp; u32 stat; u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) * address of raw VBT data. */ u32 rvds; /* Size of raw vbt data */ u8 rsvd[58]; } __packed; /* OpRegion mailbox #5: ASLE ext */ struct opregion_asle_ext { u32 phed; /* Panel Header */ u8 bddc[256]; /* Panel EDID */ u8 rsvd[764]; } __packed; /* Driver readiness indicator */ #define ASLE_ARDY_READY (1 << 0) #define ASLE_ARDY_NOT_READY (0 << 0) /* ASLE Interrupt Command (ASLC) bits */ #define ASLC_SET_ALS_ILLUM (1 << 0) #define ASLC_SET_BACKLIGHT (1 << 1) #define ASLC_SET_PFIT (1 << 2) #define ASLC_SET_PWM_FREQ (1 << 3) #define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4) #define ASLC_BUTTON_ARRAY (1 << 5) #define ASLC_CONVERTIBLE_INDICATOR (1 << 6) #define ASLC_DOCKING_INDICATOR (1 << 7) #define ASLC_ISCT_STATE_CHANGE (1 << 8) #define ASLC_REQ_MSK 0x1ff /* response bits */ #define ASLC_ALS_ILLUM_FAILED (1 << 10) #define ASLC_BACKLIGHT_FAILED (1 << 12) #define ASLC_PFIT_FAILED (1 << 14) #define ASLC_PWM_FREQ_FAILED (1 << 16) #define ASLC_ROTATION_ANGLES_FAILED (1 << 18) #define ASLC_BUTTON_ARRAY_FAILED (1 << 20) #define ASLC_CONVERTIBLE_FAILED (1 << 22) #define ASLC_DOCKING_FAILED (1 << 24) #define ASLC_ISCT_STATE_FAILED (1 << 26) /* Technology enabled indicator */ #define ASLE_TCHE_ALS_EN (1 << 0) #define ASLE_TCHE_BLC_EN (1 << 1) #define ASLE_TCHE_PFIT_EN (1 << 2) #define ASLE_TCHE_PFMB_EN (1 << 3) /* ASLE backlight brightness to set */ #define ASLE_BCLP_VALID (1<<31) #define ASLE_BCLP_MSK (~(1<<31)) /* ASLE panel fitting request */ #define ASLE_PFIT_VALID (1<<31) #define ASLE_PFIT_CENTER (1<<0) #define ASLE_PFIT_STRETCH_TEXT (1<<1) #define ASLE_PFIT_STRETCH_GFX (1<<2) /* PWM frequency and minimum brightness */ #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) #define ASLE_PFMB_PWM_MASK (0x7ffffe00) #define ASLE_PFMB_PWM_VALID (1<<31) #define ASLE_CBLV_VALID (1<<31) /* IUER */ #define ASLE_IUER_DOCKING (1 << 7) #define ASLE_IUER_CONVERTIBLE (1 << 6) #define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4) #define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3) #define ASLE_IUER_VOLUME_UP_BTN (1 << 2) #define ASLE_IUER_WINDOWS_BTN (1 << 1) #define ASLE_IUER_POWER_BTN (1 << 0) #define ASLE_PHED_EDID_VALID_MASK 0x3 /* Software System Control Interrupt (SWSCI) */ #define SWSCI_SCIC_INDICATOR (1 << 0) #define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1 #define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1) #define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8 #define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8) #define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8 #define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8) #define SWSCI_SCIC_EXIT_STATUS_SHIFT 5 #define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5) #define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1 #define SWSCI_FUNCTION_CODE(main, sub) \ ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \ (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT) /* SWSCI: Get BIOS Data (GBDA) */ #define SWSCI_GBDA 4 #define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0) #define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1) #define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4) #define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5) #define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6) #define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7) #define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10) /* SWSCI: System BIOS Callbacks (SBCB) */ #define SWSCI_SBCB 6 #define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0) #define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1) #define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3) #define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4) #define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5) #define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6) #define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7) #define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8) #define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9) #define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10) #define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11) #define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16) #define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17) #define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18) #define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) #define MAX_DSLP 1500 static int check_swsci_function(struct drm_i915_private *i915, u32 function) { struct opregion_swsci *swsci = i915->display.opregion.swsci; u32 main_function, sub_function; if (!swsci) return -ENODEV; main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >> SWSCI_SCIC_MAIN_FUNCTION_SHIFT; sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >> SWSCI_SCIC_SUB_FUNCTION_SHIFT; /* Check if we can call the function. See swsci_setup for details. */ if (main_function == SWSCI_SBCB) { if ((i915->display.opregion.swsci_sbcb_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } else if (main_function == SWSCI_GBDA) { if ((i915->display.opregion.swsci_gbda_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } return 0; } static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { struct opregion_swsci *swsci = dev_priv->display.opregion.swsci; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 scic, dslp; u16 swsci_val; int ret; ret = check_swsci_function(dev_priv, function); if (ret) return ret; /* Driver sleep timeout in ms. */ dslp = swsci->dslp; if (!dslp) { /* The spec says 2ms should be the default, but it's too small * for some machines. */ dslp = 50; } else if (dslp > MAX_DSLP) { /* Hey bios, trust must be earned. */ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " "using %u ms instead\n", dslp, MAX_DSLP); dslp = MAX_DSLP; } /* The spec tells us to do this, but we are the only user... */ scic = swsci->scic; if (scic & SWSCI_SCIC_INDICATOR) { drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n"); return -EBUSY; } scic = function | SWSCI_SCIC_INDICATOR; swsci->parm = parm; swsci->scic = scic; /* Ensure SCI event is selected and event trigger is cleared. */ pci_read_config_word(pdev, SWSCI, &swsci_val); if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { swsci_val |= SWSCI_SCISEL; swsci_val &= ~SWSCI_GSSCIE; pci_write_config_word(pdev, SWSCI, swsci_val); } /* Use event trigger to tell bios to check the mail. */ swsci_val |= SWSCI_GSSCIE; pci_write_config_word(pdev, SWSCI, swsci_val); /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) if (wait_for(C, dslp)) { drm_dbg(&dev_priv->drm, "SWSCI request timed out\n"); return -ETIMEDOUT; } scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >> SWSCI_SCIC_EXIT_STATUS_SHIFT; /* Note: scic == 0 is an error! */ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) { drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic); return -EIO; } if (parm_out) *parm_out = swsci->parm; return 0; #undef C } #define DISPLAY_TYPE_CRT 0 #define DISPLAY_TYPE_TV 1 #define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2 #define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3 int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); u32 parm = 0; u32 type = 0; u32 port; int ret; /* don't care about old stuff for now */ if (!HAS_DDI(dev_priv)) return 0; /* Avoid port out of bounds checks if SWSCI isn't there. */ ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE); if (ret) return ret; if (intel_encoder->type == INTEL_OUTPUT_DSI) port = 0; else port = intel_encoder->port; if (port == PORT_E) { port = 0; } else { parm |= 1 << port; port++; } /* * The port numbering and mapping here is bizarre. The now-obsolete * swsci spec supports ports numbered [0..4]. Port E is handled as a * special case, but port F and beyond are not. The functionality is * supposed to be obsolete for new platforms. Just bail out if the port * number is out of bounds after mapping. */ if (port > 4) { drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", intel_encoder->base.base.id, intel_encoder->base.name, port_name(intel_encoder->port), port); return -EINVAL; } if (!enable) parm |= 4 << 8; switch (intel_encoder->type) { case INTEL_OUTPUT_ANALOG: type = DISPLAY_TYPE_CRT; break; case INTEL_OUTPUT_DDI: case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; break; case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_DSI: type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL; break; default: drm_WARN_ONCE(&dev_priv->drm, 1, "unsupported intel_encoder type %d\n", intel_encoder->type); return -EINVAL; } parm |= type << (16 + port * 3); return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); } static const struct { pci_power_t pci_power_state; u32 parm; } power_state_map[] = { { PCI_D0, 0x00 }, { PCI_D1, 0x01 }, { PCI_D2, 0x02 }, { PCI_D3hot, 0x04 }, { PCI_D3cold, 0x04 }, }; int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state) { int i; if (!HAS_DDI(dev_priv)) return 0; for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { if (state == power_state_map[i].pci_power_state) return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE, power_state_map[i].parm, NULL); } return -EINVAL; } static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; struct opregion_asle *asle = dev_priv->display.opregion.asle; drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); if (acpi_video_get_backlight_type() == acpi_backlight_native) { drm_dbg_kms(&dev_priv->drm, "opregion backlight request ignored\n"); return 0; } if (!(bclp & ASLE_BCLP_VALID)) return ASLC_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; if (bclp > 255) return ASLC_BACKLIGHT_FAILED; drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL); /* * Update backlight on all connectors that support backlight (usually * only one). */ drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n", bclp); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) intel_backlight_set_acpi(connector->base.state, bclp, 255); drm_connector_list_iter_end(&conn_iter); asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); return 0; } static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ drm_dbg(&dev_priv->drm, "Illum is not supported\n"); return ASLC_ALS_ILLUM_FAILED; } static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) { drm_dbg(&dev_priv->drm, "PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ drm_dbg(&dev_priv->drm, "Pfit is not supported\n"); return ASLC_PFIT_FAILED; } static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) { drm_dbg(&dev_priv->drm, "SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) { if (!iuer) drm_dbg(&dev_priv->drm, "Button array event is not supported (nothing)\n"); if (iuer & ASLE_IUER_ROTATION_LOCK_BTN) drm_dbg(&dev_priv->drm, "Button array event is not supported (rotation lock)\n"); if (iuer & ASLE_IUER_VOLUME_DOWN_BTN) drm_dbg(&dev_priv->drm, "Button array event is not supported (volume down)\n"); if (iuer & ASLE_IUER_VOLUME_UP_BTN) drm_dbg(&dev_priv->drm, "Button array event is not supported (volume up)\n"); if (iuer & ASLE_IUER_WINDOWS_BTN) drm_dbg(&dev_priv->drm, "Button array event is not supported (windows)\n"); if (iuer & ASLE_IUER_POWER_BTN) drm_dbg(&dev_priv->drm, "Button array event is not supported (power)\n"); return ASLC_BUTTON_ARRAY_FAILED; } static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) drm_dbg(&dev_priv->drm, "Convertible is not supported (clamshell)\n"); else drm_dbg(&dev_priv->drm, "Convertible is not supported (slate)\n"); return ASLC_CONVERTIBLE_FAILED; } static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n"); else drm_dbg(&dev_priv->drm, "Docking is not supported (undocked)\n"); return ASLC_DOCKING_FAILED; } static u32 asle_isct_state(struct drm_i915_private *dev_priv) { drm_dbg(&dev_priv->drm, "ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; } static void asle_work(struct work_struct *work) { struct intel_opregion *opregion = container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = container_of(opregion, struct drm_i915_private, display.opregion); struct opregion_asle *asle = dev_priv->display.opregion.asle; u32 aslc_stat = 0; u32 aslc_req; if (!asle) return; aslc_req = asle->aslc; if (!(aslc_req & ASLC_REQ_MSK)) { drm_dbg(&dev_priv->drm, "No request on ASLC interrupt 0x%08x\n", aslc_req); return; } if (aslc_req & ASLC_SET_ALS_ILLUM) aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi); if (aslc_req & ASLC_SET_BACKLIGHT) aslc_stat |= asle_set_backlight(dev_priv, asle->bclp); if (aslc_req & ASLC_SET_PFIT) aslc_stat |= asle_set_pfit(dev_priv, asle->pfit); if (aslc_req & ASLC_SET_PWM_FREQ) aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb); if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) aslc_stat |= asle_set_supported_rotation_angles(dev_priv, asle->srot); if (aslc_req & ASLC_BUTTON_ARRAY) aslc_stat |= asle_set_button_array(dev_priv, asle->iuer); if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) aslc_stat |= asle_set_convertible(dev_priv, asle->iuer); if (aslc_req & ASLC_DOCKING_INDICATOR) aslc_stat |= asle_set_docking(dev_priv, asle->iuer); if (aslc_req & ASLC_ISCT_STATE_CHANGE) aslc_stat |= asle_isct_state(dev_priv); asle->aslc = aslc_stat; } void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { if (dev_priv->display.opregion.asle) queue_work(dev_priv->unordered_wq, &dev_priv->display.opregion.asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) /* * The only video events relevant to opregion are 0x80. These indicate either a * docking event, lid switch or display switch request. In Linux, these are * handled by the dock, button and video drivers. */ static int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) { struct intel_opregion *opregion = container_of(nb, struct intel_opregion, acpi_notifier); struct acpi_bus_event *event = data; struct opregion_acpi *acpi; int ret = NOTIFY_OK; if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; acpi = opregion->acpi; if (event->type == 0x80 && ((acpi->cevt & 1) == 0)) ret = NOTIFY_BAD; acpi->csts = 0; return ret; } /* * Initialise the DIDL field in opregion. This passes a list of devices to * the firmware. Values are defined by section B.4.2 of the ACPI specification * (version 3) */ static void set_did(struct intel_opregion *opregion, int i, u32 val) { if (i < ARRAY_SIZE(opregion->acpi->didl)) { opregion->acpi->didl[i] = val; } else { i -= ARRAY_SIZE(opregion->acpi->didl); if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) return; opregion->acpi->did2[i] = val; } } static void intel_didl_outputs(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; /* * In theory, did2, the extended didl, gets added at opregion version * 3.0. In practice, however, we're supposed to set it for earlier * versions as well, since a BIOS that doesn't understand did2 should * not look at it anyway. Use a variable so we can tweak this if a need * arises later. */ max_outputs = ARRAY_SIZE(opregion->acpi->didl) + ARRAY_SIZE(opregion->acpi->did2); intel_acpi_device_id_update(dev_priv); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (i < max_outputs) set_did(opregion, i, connector->acpi_device_id); i++; } drm_connector_list_iter_end(&conn_iter); drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i); if (i > max_outputs) drm_err(&dev_priv->drm, "More than %d outputs in connector list\n", max_outputs); /* If fewer than max outputs, the list must be null terminated */ if (i < max_outputs) set_did(opregion, i, 0); } static void intel_setup_cadls(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0; /* * Initialize the CADL field from the connector device ids. This is * essentially the same as copying from the DIDL. Technically, this is * not always correct as display outputs may exist, but not active. This * initialization is necessary for some Clevo laptops that check this * field before processing the brightness and display switching hotkeys. * * Note that internal panels should be at the front of the connector * list already, ensuring they're not left out. */ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (i >= ARRAY_SIZE(opregion->acpi->cadl)) break; opregion->acpi->cadl[i++] = connector->acpi_device_id; } drm_connector_list_iter_end(&conn_iter); /* If fewer than 8 active devices, the list must be null terminated */ if (i < ARRAY_SIZE(opregion->acpi->cadl)) opregion->acpi->cadl[i] = 0; } static void swsci_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; bool requested_callbacks = false; u32 tmp; /* Sub-function code 0 is okay, let's allow them. */ opregion->swsci_gbda_sub_functions = 1; opregion->swsci_sbcb_sub_functions = 1; /* We use GBDA to ask for supported GBDA calls. */ if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ tmp <<= 1; opregion->swsci_gbda_sub_functions |= tmp; } /* * We also use GBDA to ask for _requested_ SBCB callbacks. The driver * must not call interfaces that are not specifically requested by the * bios. */ if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { /* here, the bits already match sub-function codes */ opregion->swsci_sbcb_sub_functions |= tmp; requested_callbacks = true; } /* * But we use SBCB to ask for _supported_ SBCB calls. This does not mean * the callback is _requested_. But we still can't call interfaces that * are not requested. */ if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ u32 low = tmp & 0x7ff; u32 high = tmp & ~0xfff; /* bit 11 is reserved */ tmp = (high << 4) | (low << 1) | 1; /* best guess what to do with supported wrt requested */ if (requested_callbacks) { u32 req = opregion->swsci_sbcb_sub_functions; if ((req & tmp) != req) drm_dbg(&dev_priv->drm, "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp); /* XXX: for now, trust the requested callbacks */ /* opregion->swsci_sbcb_sub_functions &= tmp; */ } else { opregion->swsci_sbcb_sub_functions |= tmp; } } drm_dbg(&dev_priv->drm, "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n", opregion->swsci_gbda_sub_functions, opregion->swsci_sbcb_sub_functions); } static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_no_opregion_vbt[] = { { .callback = intel_no_opregion_vbt_callback, .ident = "ThinkCentre A57", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), }, }, { } }; static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; const struct firmware *fw = NULL; const char *name = dev_priv->params.vbt_firmware; int ret; if (!name || !*name) return -ENOENT; ret = request_firmware(&fw, name, dev_priv->drm.dev); if (ret) { drm_err(&dev_priv->drm, "Requesting VBT firmware \"%s\" failed (%d)\n", name, ret); return ret; } if (intel_bios_is_valid_vbt(fw->data, fw->size)) { opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL); if (opregion->vbt_firmware) { drm_dbg_kms(&dev_priv->drm, "Found valid VBT firmware \"%s\"\n", name); opregion->vbt = opregion->vbt_firmware; opregion->vbt_size = fw->size; ret = 0; } else { ret = -ENOMEM; } } else { drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n", name); ret = -EINVAL; } release_firmware(fw); return ret; } int intel_opregion_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; void *base; const void *vbt; u32 vbt_size; BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); pci_read_config_dword(pdev, ASLS, &asls); drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n"); return -ENOTSUPP; } INIT_WORK(&opregion->asle_work, asle_work); base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); if (!base) return -ENOMEM; memcpy(buf, base, sizeof(buf)); if (memcmp(buf, OPREGION_SIGNATURE, 16)) { drm_dbg(&dev_priv->drm, "opregion signature mismatch\n"); err = -EINVAL; goto err_out; } opregion->header = base; opregion->lid_state = base + ACPI_CLID; drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n", opregion->header->over.major, opregion->header->over.minor, opregion->header->over.revision); mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; /* * Indicate we handle monitor hotplug events ourselves so we do * not need ACPI notifications for them. Disabling these avoids * triggering the AML code doing the notifation, which may be * broken as Windows also seems to disable these. */ opregion->acpi->chpd = 1; } if (mboxes & MBOX_SWSCI) { u8 major = opregion->header->over.major; if (major >= 3) { drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n"); } else { if (major >= 2) drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n"); drm_dbg(&dev_priv->drm, "SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; swsci_setup(dev_priv); } } if (mboxes & MBOX_ASLE) { drm_dbg(&dev_priv->drm, "ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; opregion->asle->ardy = ASLE_ARDY_NOT_READY; } if (mboxes & MBOX_ASLE_EXT) { drm_dbg(&dev_priv->drm, "ASLE extension supported\n"); opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET; } if (mboxes & MBOX_BACKLIGHT) { drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n"); } if (intel_load_vbt_firmware(dev_priv) == 0) goto out; if (dmi_check_system(intel_no_opregion_vbt)) goto out; if (opregion->header->over.major >= 2 && opregion->asle && opregion->asle->rvda && opregion->asle->rvds) { resource_size_t rvda = opregion->asle->rvda; /* * opregion 2.0: rvda is the physical VBT address. * * opregion 2.1+: rvda is unsigned, relative offset from * opregion base, and should never point within opregion. */ if (opregion->header->over.major > 2 || opregion->header->over.minor >= 1) { drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE); rvda += asls; } opregion->rvda = memremap(rvda, opregion->asle->rvds, MEMREMAP_WB); vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { drm_dbg_kms(&dev_priv->drm, "Found valid VBT in ACPI OpRegion (RVDA)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; goto out; } else { drm_dbg_kms(&dev_priv->drm, "Invalid VBT in ACPI OpRegion (RVDA)\n"); memunmap(opregion->rvda); opregion->rvda = NULL; } } vbt = base + OPREGION_VBT_OFFSET; /* * The VBT specification says that if the ASLE ext mailbox is not used * its area is reserved, but on some CHT boards the VBT extends into the * ASLE ext area. Allow this even though it is against the spec, so we * do not end up rejecting the VBT on those boards (and end up not * finding the LCD panel because of this). */ vbt_size = (mboxes & MBOX_ASLE_EXT) ? OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; vbt_size -= OPREGION_VBT_OFFSET; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { drm_dbg_kms(&dev_priv->drm, "Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; } else { drm_dbg_kms(&dev_priv->drm, "Invalid VBT in ACPI OpRegion (Mailbox #4)\n"); } out: return 0; err_out: memunmap(base); return err; } static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id) { DRM_INFO("Using panel type from OpRegion on %s\n", id->ident); return 1; } static const struct dmi_system_id intel_use_opregion_panel_type[] = { { .callback = intel_use_opregion_panel_type_callback, .ident = "Conrac GmbH IX45GM2", .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"), DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"), }, }, { } }; int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) { u32 panel_details; int ret; ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) return ret; ret = (panel_details >> 8) & 0xff; if (ret > 0x10) { drm_dbg_kms(&dev_priv->drm, "Invalid OpRegion panel type 0x%x\n", ret); return -EINVAL; } /* fall back to VBT panel type? */ if (ret == 0x0) { drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n"); return -ENODEV; } /* * So far we know that some machined must use it, others must not use it. * There doesn't seem to be any way to determine which way to go, except * via a quirk list :( */ if (!dmi_check_system(intel_use_opregion_panel_type)) { drm_dbg_kms(&dev_priv->drm, "Ignoring OpRegion panel type (%d)\n", ret - 1); return -ENODEV; } return ret - 1; } /** * intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5 * @intel_connector: eDP connector * * This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed * to it. * * Returns: * The EDID in the OpRegion, or NULL if there is none or it's invalid. * */ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_opregion *opregion = &i915->display.opregion; const struct drm_edid *drm_edid; const void *edid; int len; if (!opregion->asle_ext) return NULL; edid = opregion->asle_ext->bddc; /* Validity corresponds to number of 128-byte blocks */ len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128; if (!len || !memchr_inv(edid, 0, len)) return NULL; drm_edid = drm_edid_alloc(edid, len); if (!drm_edid_valid(drm_edid)) { drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n"); drm_edid_free(drm_edid); drm_edid = NULL; } return drm_edid; } bool intel_opregion_headless_sku(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; struct opregion_header *header = opregion->header; if (!header || header->over.major < 2 || (header->over.major == 2 && header->over.minor < 3)) return false; return opregion->header->pcon & PCON_HEADLESS_SKU; } void intel_opregion_register(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; if (opregion->acpi) { opregion->acpi_notifier.notifier_call = intel_opregion_video_event; register_acpi_notifier(&opregion->acpi_notifier); } intel_opregion_resume(i915); } static void intel_opregion_resume_display(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (opregion->acpi) { intel_didl_outputs(i915); intel_setup_cadls(i915); /* * Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video * module. We don't actually need to do anything with them. */ opregion->acpi->csts = 0; opregion->acpi->drdy = 1; } if (opregion->asle) { opregion->asle->tche = ASLE_TCHE_BLC_EN; opregion->asle->ardy = ASLE_ARDY_READY; } /* Some platforms abuse the _DSM to enable MUX */ intel_dsm_get_bios_data_funcs_supported(i915); } void intel_opregion_resume(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; if (HAS_DISPLAY(i915)) intel_opregion_resume_display(i915); intel_opregion_notify_adapter(i915, PCI_D0); } static void intel_opregion_suspend_display(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; cancel_work_sync(&i915->display.opregion.asle_work); if (opregion->acpi) opregion->acpi->drdy = 0; } void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) { struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; intel_opregion_notify_adapter(i915, state); if (HAS_DISPLAY(i915)) intel_opregion_suspend_display(i915); } void intel_opregion_unregister(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; intel_opregion_suspend(i915, PCI_D1); if (!opregion->header) return; if (opregion->acpi_notifier.notifier_call) { unregister_acpi_notifier(&opregion->acpi_notifier); opregion->acpi_notifier.notifier_call = NULL; } } void intel_opregion_cleanup(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; /* just clear all opregion memory pointers now */ memunmap(opregion->header); if (opregion->rvda) { memunmap(opregion->rvda); opregion->rvda = NULL; } if (opregion->vbt_firmware) { kfree(opregion->vbt_firmware); opregion->vbt_firmware = NULL; } opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; opregion->asle_ext = NULL; opregion->vbt = NULL; opregion->lid_state = NULL; }
linux-master
drivers/gpu/drm/i915/display/intel_opregion.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/string.h> #include "i915_drv.h" #include "intel_atomic.h" #include "intel_display_types.h" #include "intel_global_state.h" static void __intel_atomic_global_state_free(struct kref *kref) { struct intel_global_state *obj_state = container_of(kref, struct intel_global_state, ref); struct intel_global_obj *obj = obj_state->obj; obj->funcs->atomic_destroy_state(obj, obj_state); } static void intel_atomic_global_state_put(struct intel_global_state *obj_state) { kref_put(&obj_state->ref, __intel_atomic_global_state_free); } static struct intel_global_state * intel_atomic_global_state_get(struct intel_global_state *obj_state) { kref_get(&obj_state->ref); return obj_state; } void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv, struct intel_global_obj *obj, struct intel_global_state *state, const struct intel_global_state_funcs *funcs) { memset(obj, 0, sizeof(*obj)); state->obj = obj; kref_init(&state->ref); obj->state = state; obj->funcs = funcs; list_add_tail(&obj->head, &dev_priv->display.global.obj_list); } void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv) { struct intel_global_obj *obj, *next; list_for_each_entry_safe(obj, next, &dev_priv->display.global.obj_list, head) { list_del(&obj->head); drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1); intel_atomic_global_state_put(obj->state); } } static void assert_global_state_write_locked(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) drm_modeset_lock_assert_held(&crtc->base.mutex); } static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx, struct drm_modeset_lock *lock) { struct drm_modeset_lock *l; list_for_each_entry(l, &ctx->locked, head) { if (lock == l) return true; } return false; } static void assert_global_state_read_locked(struct intel_atomic_state *state) { struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) { if (modeset_lock_is_held(ctx, &crtc->base.mutex)) return; } drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n"); } struct intel_global_state * intel_atomic_get_global_obj_state(struct intel_atomic_state *state, struct intel_global_obj *obj) { struct drm_i915_private *i915 = to_i915(state->base.dev); int index, num_objs, i; size_t size; struct __intel_global_objs_state *arr; struct intel_global_state *obj_state; for (i = 0; i < state->num_global_objs; i++) if (obj == state->global_objs[i].ptr) return state->global_objs[i].state; assert_global_state_read_locked(state); num_objs = state->num_global_objs + 1; size = sizeof(*state->global_objs) * num_objs; arr = krealloc(state->global_objs, size, GFP_KERNEL); if (!arr) return ERR_PTR(-ENOMEM); state->global_objs = arr; index = state->num_global_objs; memset(&state->global_objs[index], 0, sizeof(*state->global_objs)); obj_state = obj->funcs->atomic_duplicate_state(obj); if (!obj_state) return ERR_PTR(-ENOMEM); obj_state->obj = obj; obj_state->changed = false; kref_init(&obj_state->ref); state->global_objs[index].state = obj_state; state->global_objs[index].old_state = intel_atomic_global_state_get(obj->state); state->global_objs[index].new_state = obj_state; state->global_objs[index].ptr = obj; obj_state->state = state; state->num_global_objs = num_objs; drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n", obj, obj_state, state); return obj_state; } struct intel_global_state * intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state, struct intel_global_obj *obj) { int i; for (i = 0; i < state->num_global_objs; i++) if (obj == state->global_objs[i].ptr) return state->global_objs[i].old_state; return NULL; } struct intel_global_state * intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state, struct intel_global_obj *obj) { int i; for (i = 0; i < state->num_global_objs; i++) if (obj == state->global_objs[i].ptr) return state->global_objs[i].new_state; return NULL; } void intel_atomic_swap_global_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *old_obj_state, *new_obj_state; struct intel_global_obj *obj; int i; for_each_oldnew_global_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state); /* * If the new state wasn't modified (and properly * locked for write access) we throw it away. */ if (!new_obj_state->changed) continue; assert_global_state_write_locked(dev_priv); old_obj_state->state = state; new_obj_state->state = NULL; state->global_objs[i].state = old_obj_state; intel_atomic_global_state_put(obj->state); obj->state = intel_atomic_global_state_get(new_obj_state); } } void intel_atomic_clear_global_state(struct intel_atomic_state *state) { int i; for (i = 0; i < state->num_global_objs; i++) { intel_atomic_global_state_put(state->global_objs[i].old_state); intel_atomic_global_state_put(state->global_objs[i].new_state); state->global_objs[i].ptr = NULL; state->global_objs[i].state = NULL; state->global_objs[i].old_state = NULL; state->global_objs[i].new_state = NULL; } state->num_global_objs = 0; } int intel_atomic_lock_global_state(struct intel_global_state *obj_state) { struct intel_atomic_state *state = obj_state->state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) { int ret; ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx); if (ret) return ret; } obj_state->changed = true; return 0; } int intel_atomic_serialize_global_state(struct intel_global_state *obj_state) { struct intel_atomic_state *state = obj_state->state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); } obj_state->changed = true; return 0; } bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc *crtc; for_each_intel_crtc(&i915->drm, crtc) if (!intel_atomic_get_new_crtc_state(state, crtc)) return false; return true; }
linux-master
drivers/gpu/drm/i915/display/intel_global_state.c
/* * Copyright © 2006-2008 Intel Corporation * Jesse Barnes <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * */ /** @file * Integrated TV-out support for the 915GM and 945GM. */ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_load_detect.h" #include "intel_tv.h" #include "intel_tv_regs.h" enum tv_margin { TV_MARGIN_LEFT, TV_MARGIN_TOP, TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM }; struct intel_tv { struct intel_encoder base; int type; }; struct video_levels { u16 blank, black; u8 burst; }; struct color_conversion { u16 ry, gy, by, ay; u16 ru, gu, bu, au; u16 rv, gv, bv, av; }; static const u32 filter_table[] = { 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, 0x28003100, 0x28002F00, 0x00003100, 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, 0x28003100, 0x28002F00, 0x00003100, }; /* * Color conversion values have 3 separate fixed point formats: * * 10 bit fields (ay, au) * 1.9 fixed point (b.bbbbbbbbb) * 11 bit fields (ry, by, ru, gu, gv) * exp.mantissa (ee.mmmmmmmmm) * ee = 00 = 10^-1 (0.mmmmmmmmm) * ee = 01 = 10^-2 (0.0mmmmmmmmm) * ee = 10 = 10^-3 (0.00mmmmmmmmm) * ee = 11 = 10^-4 (0.000mmmmmmmmm) * 12 bit fields (gy, rv, bu) * exp.mantissa (eee.mmmmmmmmm) * eee = 000 = 10^-1 (0.mmmmmmmmm) * eee = 001 = 10^-2 (0.0mmmmmmmmm) * eee = 010 = 10^-3 (0.00mmmmmmmmm) * eee = 011 = 10^-4 (0.000mmmmmmmmm) * eee = 100 = reserved * eee = 101 = reserved * eee = 110 = reserved * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation) * * Saturation and contrast are 8 bits, with their own representation: * 8 bit field (saturation, contrast) * exp.mantissa (ee.mmmmmm) * ee = 00 = 10^-1 (0.mmmmmm) * ee = 01 = 10^0 (m.mmmmm) * ee = 10 = 10^1 (mm.mmmm) * ee = 11 = 10^2 (mmm.mmm) * * Simple conversion function: * * static u32 * float_to_csc_11(float f) * { * u32 exp; * u32 mant; * u32 ret; * * if (f < 0) * f = -f; * * if (f >= 1) { * exp = 0x7; * mant = 1 << 8; * } else { * for (exp = 0; exp < 3 && f < 0.5; exp++) * f *= 2.0; * mant = (f * (1 << 9) + 0.5); * if (mant >= (1 << 9)) * mant = (1 << 9) - 1; * } * ret = (exp << 9) | mant; * return ret; * } */ /* * Behold, magic numbers! If we plant them they might grow a big * s-video cable to the sky... or something. * * Pre-converted to appropriate hex value. */ /* * PAL & NTSC values for composite & s-video connections */ static const struct color_conversion ntsc_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_composite = { .blank = 225, .black = 267, .burst = 113, }; static const struct color_conversion ntsc_m_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels ntsc_m_levels_svideo = { .blank = 266, .black = 316, .burst = 133, }; static const struct color_conversion ntsc_j_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_composite = { .blank = 225, .black = 225, .burst = 113, }; static const struct color_conversion ntsc_j_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, }; static const struct video_levels ntsc_j_levels_svideo = { .blank = 266, .black = 266, .burst = 133, }; static const struct color_conversion pal_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, }; static const struct video_levels pal_levels_composite = { .blank = 237, .black = 237, .burst = 118, }; static const struct color_conversion pal_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, }; static const struct video_levels pal_levels_svideo = { .blank = 280, .black = 280, .burst = 139, }; static const struct color_conversion pal_m_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_m_levels_composite = { .blank = 225, .black = 267, .burst = 113, }; static const struct color_conversion pal_m_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_m_levels_svideo = { .blank = 266, .black = 316, .burst = 133, }; static const struct color_conversion pal_n_csc_composite = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, }; static const struct video_levels pal_n_levels_composite = { .blank = 225, .black = 267, .burst = 118, }; static const struct color_conversion pal_n_csc_svideo = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, }; static const struct video_levels pal_n_levels_svideo = { .blank = 266, .black = 316, .burst = 139, }; /* * Component connections */ static const struct color_conversion sdtv_csc_yprpb = { .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, }; static const struct color_conversion hdtv_csc_yprpb = { .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, }; static const struct video_levels component_levels = { .blank = 279, .black = 279, .burst = 0, }; struct tv_mode { const char *name; u32 clock; u16 refresh; /* in millihertz (for precision) */ u8 oversample; u8 hsync_end; u16 hblank_start, hblank_end, htotal; bool progressive : 1, trilevel_sync : 1, component_only : 1; u8 vsync_start_f1, vsync_start_f2, vsync_len; bool veq_ena : 1; u8 veq_start_f1, veq_start_f2, veq_len; u8 vi_end_f1, vi_end_f2; u16 nbr_end; bool burst_ena : 1; u8 hburst_start, hburst_len; u8 vburst_start_f1; u16 vburst_end_f1; u8 vburst_start_f2; u16 vburst_end_f2; u8 vburst_start_f3; u16 vburst_end_f3; u8 vburst_start_f4; u16 vburst_end_f4; /* * subcarrier programming */ u16 dda2_size, dda3_size; u8 dda1_inc; u16 dda2_inc, dda3_inc; u32 sc_reset; bool pal_burst : 1; /* * blank/black levels */ const struct video_levels *composite_levels, *svideo_levels; const struct color_conversion *composite_color, *svideo_color; const u32 *filter_table; }; /* * Sub carrier DDA * * I think this works as follows: * * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096 * * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value * * So, * dda1_ideal = subcarrier/pixel * 4096 * dda1_inc = floor (dda1_ideal) * dda2 = dda1_ideal - dda1_inc * * then pick a ratio for dda2 that gives the closest approximation. If * you can't get close enough, you can play with dda3 as well. This * seems likely to happen when dda2 is small as the jumps would be larger * * To invert this, * * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size) * * The constants below were all computed using a 107.520MHz clock */ /* * Register programming values for TV modes. * * These values account for -1s required. */ static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 108000, .refresh = 59940, .oversample = 8, .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, .svideo_levels = &ntsc_m_levels_svideo, .svideo_color = &ntsc_m_csc_svideo, .filter_table = filter_table, }, { .name = "NTSC-443", .clock = 108000, .refresh = 59940, .oversample = 8, .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, .dda2_inc = 4093, .dda2_size = 27456, .dda3_inc = 310, .dda3_size = 525, .sc_reset = TV_SC_RESET_NEVER, .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, .svideo_levels = &ntsc_m_levels_svideo, .svideo_color = &ntsc_m_csc_svideo, .filter_table = filter_table, }, { .name = "NTSC-J", .clock = 108000, .refresh = 59940, .oversample = 8, .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 20800, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, .pal_burst = false, .composite_levels = &ntsc_j_levels_composite, .composite_color = &ntsc_j_csc_composite, .svideo_levels = &ntsc_j_levels_svideo, .svideo_color = &ntsc_j_csc_svideo, .filter_table = filter_table, }, { .name = "PAL-M", .clock = 108000, .refresh = 59940, .oversample = 8, .component_only = false, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, .vburst_start_f3 = 9, .vburst_end_f3 = 240, .vburst_start_f4 = 10, .vburst_end_f4 = 240, /* desired 3.5800000 actual 3.5800000 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 16704, .dda2_size = 27456, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_m_levels_composite, .composite_color = &pal_m_csc_composite, .svideo_levels = &pal_m_levels_svideo, .svideo_color = &pal_m_csc_svideo, .filter_table = filter_table, }, { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", .clock = 108000, .refresh = 50000, .oversample = 8, .component_only = false, .hsync_end = 64, .hblank_end = 128, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, .burst_ena = true, .hburst_start = 73, .hburst_len = 34, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, .vburst_start_f3 = 9, .vburst_end_f3 = 286, .vburst_start_f4 = 9, .vburst_end_f4 = 285, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 135, .dda2_inc = 23578, .dda2_size = 27648, .dda3_inc = 134, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_n_levels_composite, .composite_color = &pal_n_csc_composite, .svideo_levels = &pal_n_levels_svideo, .svideo_color = &pal_n_csc_svideo, .filter_table = filter_table, }, { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", .clock = 108000, .refresh = 50000, .oversample = 8, .component_only = false, .hsync_end = 64, .hblank_end = 142, .hblank_start = 844, .htotal = 863, .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 5, .vsync_start_f2 = 6, .vsync_len = 5, .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 15, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, .burst_ena = true, .hburst_start = 73, .hburst_len = 32, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, .vburst_start_f3 = 9, .vburst_end_f3 = 286, .vburst_start_f4 = 9, .vburst_end_f4 = 285, /* desired 4.4336180 actual 4.4336180 clock 107.52 */ .dda1_inc = 168, .dda2_inc = 4122, .dda2_size = 27648, .dda3_inc = 67, .dda3_size = 625, .sc_reset = TV_SC_RESET_EVERY_8, .pal_burst = true, .composite_levels = &pal_levels_composite, .composite_color = &pal_csc_composite, .svideo_levels = &pal_levels_svideo, .svideo_color = &pal_csc_svideo, .filter_table = filter_table, }, { .name = "480p", .clock = 108000, .refresh = 59940, .oversample = 4, .component_only = true, .hsync_end = 64, .hblank_end = 122, .hblank_start = 842, .htotal = 857, .progressive = true, .trilevel_sync = false, .vsync_start_f1 = 12, .vsync_start_f2 = 12, .vsync_len = 12, .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 479, .burst_ena = false, .filter_table = filter_table, }, { .name = "576p", .clock = 108000, .refresh = 50000, .oversample = 4, .component_only = true, .hsync_end = 64, .hblank_end = 139, .hblank_start = 859, .htotal = 863, .progressive = true, .trilevel_sync = false, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, .veq_ena = false, .vi_end_f1 = 48, .vi_end_f2 = 48, .nbr_end = 575, .burst_ena = false, .filter_table = filter_table, }, { .name = "720p@60Hz", .clock = 148500, .refresh = 60000, .oversample = 2, .component_only = true, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1649, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, .burst_ena = false, .filter_table = filter_table, }, { .name = "720p@50Hz", .clock = 148500, .refresh = 50000, .oversample = 2, .component_only = true, .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1979, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080i@50Hz", .clock = 148500, .refresh = 50000, .oversample = 2, .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2639, .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080i@60Hz", .clock = 148500, .refresh = 60000, .oversample = 2, .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080p@30Hz", .clock = 148500, .refresh = 30000, .oversample = 2, .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 8, .vsync_start_f2 = 8, .vsync_len = 10, .veq_ena = false, .veq_start_f1 = 0, .veq_start_f2 = 0, .veq_len = 0, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 1079, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080p@50Hz", .clock = 148500, .refresh = 50000, .oversample = 1, .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2639, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 8, .vsync_start_f2 = 8, .vsync_len = 10, .veq_ena = false, .veq_start_f1 = 0, .veq_start_f2 = 0, .veq_len = 0, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 1079, .burst_ena = false, .filter_table = filter_table, }, { .name = "1080p@60Hz", .clock = 148500, .refresh = 60000, .oversample = 1, .component_only = true, .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 8, .vsync_start_f2 = 8, .vsync_len = 10, .veq_ena = false, .veq_start_f1 = 0, .veq_start_f2 = 0, .veq_len = 0, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 1079, .burst_ena = false, .filter_table = filter_table, }, }; struct intel_tv_connector_state { struct drm_connector_state base; /* * May need to override the user margins for * gen3 >1024 wide source vertical centering. */ struct { u16 top, bottom; } margins; bool bypass_vfilter; }; #define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base) static struct drm_connector_state * intel_tv_connector_duplicate_state(struct drm_connector *connector) { struct intel_tv_connector_state *state; state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_connector_duplicate_state(connector, &state->base); return &state->base; } static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) { return container_of(encoder, struct intel_tv, base); } static struct intel_tv *intel_attached_tv(struct intel_connector *connector) { return enc_to_tv(intel_attached_encoder(connector)); } static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp = intel_de_read(dev_priv, TV_CTL); *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT; return tmp & TV_ENC_ENABLE; } static void intel_enable_tv(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE); } static void intel_disable_tv(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0); } static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) { int format = conn_state->tv.mode; return &tv_modes[format]; } static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (mode->clock > max_dotclk) return MODE_CLOCK_HIGH; /* Ensure TV refresh is close to desired refresh */ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000) return MODE_CLOCK_RANGE; return MODE_OK; } static int intel_tv_mode_vdisplay(const struct tv_mode *tv_mode) { if (tv_mode->progressive) return tv_mode->nbr_end + 1; else return 2 * (tv_mode->nbr_end + 1); } static void intel_tv_mode_to_mode(struct drm_display_mode *mode, const struct tv_mode *tv_mode, int clock) { mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive); /* * tv_mode horizontal timings: * * hsync_end * | hblank_end * | | hblank_start * | | | htotal * | _______ | * ____/ \___ * \__/ \ */ mode->hdisplay = tv_mode->hblank_start - tv_mode->hblank_end; mode->hsync_start = mode->hdisplay + tv_mode->htotal - tv_mode->hblank_start; mode->hsync_end = mode->hsync_start + tv_mode->hsync_end; mode->htotal = tv_mode->htotal + 1; /* * tv_mode vertical timings: * * vsync_start * | vsync_end * | | vi_end nbr_end * | | | | * | | _______ * \__ ____/ \ * \__/ */ mode->vdisplay = intel_tv_mode_vdisplay(tv_mode); if (tv_mode->progressive) { mode->vsync_start = mode->vdisplay + tv_mode->vsync_start_f1 + 1; mode->vsync_end = mode->vsync_start + tv_mode->vsync_len; mode->vtotal = mode->vdisplay + tv_mode->vi_end_f1 + 1; } else { mode->vsync_start = mode->vdisplay + tv_mode->vsync_start_f1 + 1 + tv_mode->vsync_start_f2 + 1; mode->vsync_end = mode->vsync_start + 2 * tv_mode->vsync_len; mode->vtotal = mode->vdisplay + tv_mode->vi_end_f1 + 1 + tv_mode->vi_end_f2 + 1; } /* TV has it's own notion of sync and other mode flags, so clear them. */ mode->flags = 0; snprintf(mode->name, sizeof(mode->name), "%dx%d%c (%s)", mode->hdisplay, mode->vdisplay, tv_mode->progressive ? 'p' : 'i', tv_mode->name); } static void intel_tv_scale_mode_horiz(struct drm_display_mode *mode, int hdisplay, int left_margin, int right_margin) { int hsync_start = mode->hsync_start - mode->hdisplay + right_margin; int hsync_end = mode->hsync_end - mode->hdisplay + right_margin; int new_htotal = mode->htotal * hdisplay / (mode->hdisplay - left_margin - right_margin); mode->clock = mode->clock * new_htotal / mode->htotal; mode->hdisplay = hdisplay; mode->hsync_start = hdisplay + hsync_start * new_htotal / mode->htotal; mode->hsync_end = hdisplay + hsync_end * new_htotal / mode->htotal; mode->htotal = new_htotal; } static void intel_tv_scale_mode_vert(struct drm_display_mode *mode, int vdisplay, int top_margin, int bottom_margin) { int vsync_start = mode->vsync_start - mode->vdisplay + bottom_margin; int vsync_end = mode->vsync_end - mode->vdisplay + bottom_margin; int new_vtotal = mode->vtotal * vdisplay / (mode->vdisplay - top_margin - bottom_margin); mode->clock = mode->clock * new_vtotal / mode->vtotal; mode->vdisplay = vdisplay; mode->vsync_start = vdisplay + vsync_start * new_vtotal / mode->vtotal; mode->vsync_end = vdisplay + vsync_end * new_vtotal / mode->vtotal; mode->vtotal = new_vtotal; } static void intel_tv_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode mode = {}; u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp; struct tv_mode tv_mode = {}; int hdisplay = adjusted_mode->crtc_hdisplay; int vdisplay = adjusted_mode->crtc_vdisplay; int xsize, ysize, xpos, ypos; pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT); tv_ctl = intel_de_read(dev_priv, TV_CTL); hctl1 = intel_de_read(dev_priv, TV_H_CTL_1); hctl3 = intel_de_read(dev_priv, TV_H_CTL_3); vctl1 = intel_de_read(dev_priv, TV_V_CTL_1); vctl2 = intel_de_read(dev_priv, TV_V_CTL_2); tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT; tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT; tv_mode.hblank_start = (hctl3 & TV_HBLANK_START_MASK) >> TV_HBLANK_START_SHIFT; tv_mode.hblank_end = (hctl3 & TV_HSYNC_END_MASK) >> TV_HBLANK_END_SHIFT; tv_mode.nbr_end = (vctl1 & TV_NBR_END_MASK) >> TV_NBR_END_SHIFT; tv_mode.vi_end_f1 = (vctl1 & TV_VI_END_F1_MASK) >> TV_VI_END_F1_SHIFT; tv_mode.vi_end_f2 = (vctl1 & TV_VI_END_F2_MASK) >> TV_VI_END_F2_SHIFT; tv_mode.vsync_len = (vctl2 & TV_VSYNC_LEN_MASK) >> TV_VSYNC_LEN_SHIFT; tv_mode.vsync_start_f1 = (vctl2 & TV_VSYNC_START_F1_MASK) >> TV_VSYNC_START_F1_SHIFT; tv_mode.vsync_start_f2 = (vctl2 & TV_VSYNC_START_F2_MASK) >> TV_VSYNC_START_F2_SHIFT; tv_mode.clock = pipe_config->port_clock; tv_mode.progressive = tv_ctl & TV_PROGRESSIVE; switch (tv_ctl & TV_OVERSAMPLE_MASK) { case TV_OVERSAMPLE_8X: tv_mode.oversample = 8; break; case TV_OVERSAMPLE_4X: tv_mode.oversample = 4; break; case TV_OVERSAMPLE_2X: tv_mode.oversample = 2; break; default: tv_mode.oversample = 1; break; } tmp = intel_de_read(dev_priv, TV_WIN_POS); xpos = tmp >> 16; ypos = tmp & 0xffff; tmp = intel_de_read(dev_priv, TV_WIN_SIZE); xsize = tmp >> 16; ysize = tmp & 0xffff; intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock); drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&mode)); intel_tv_scale_mode_horiz(&mode, hdisplay, xpos, mode.hdisplay - xsize - xpos); intel_tv_scale_mode_vert(&mode, vdisplay, ypos, mode.vdisplay - ysize - ypos); adjusted_mode->crtc_clock = mode.clock; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) adjusted_mode->crtc_clock /= 2; /* pixel counter doesn't work on i965gm TV output */ if (IS_I965GM(dev_priv)) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; } static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv, int hdisplay) { return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024; } static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode, const struct drm_connector_state *conn_state, int vdisplay) { return tv_mode->crtc_vdisplay - conn_state->tv.margins.top - conn_state->tv.margins.bottom != vdisplay; } static int intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct intel_atomic_state *state = to_intel_atomic_state(pipe_config->uapi.state); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int hdisplay = adjusted_mode->crtc_hdisplay; int vdisplay = adjusted_mode->crtc_vdisplay; int ret; if (!tv_mode) return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; pipe_config->port_clock = tv_mode->clock; ret = intel_dpll_crtc_compute_clock(state, crtc); if (ret) return ret; pipe_config->clock_set = true; intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock); drm_mode_set_crtcinfo(adjusted_mode, 0); if (intel_tv_source_too_wide(dev_priv, hdisplay) || !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) { int extra, top, bottom; extra = adjusted_mode->crtc_vdisplay - vdisplay; if (extra < 0) { drm_dbg_kms(&dev_priv->drm, "No vertical scaling for >1024 pixel wide modes\n"); return -EINVAL; } /* Need to turn off the vertical filter and center the image */ /* Attempt to maintain the relative sizes of the margins */ top = conn_state->tv.margins.top; bottom = conn_state->tv.margins.bottom; if (top + bottom) top = extra * top / (top + bottom); else top = extra / 2; bottom = extra - top; tv_conn_state->margins.top = top; tv_conn_state->margins.bottom = bottom; tv_conn_state->bypass_vfilter = true; if (!tv_mode->progressive) { adjusted_mode->clock /= 2; adjusted_mode->crtc_clock /= 2; adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; } } else { tv_conn_state->margins.top = conn_state->tv.margins.top; tv_conn_state->margins.bottom = conn_state->tv.margins.bottom; tv_conn_state->bypass_vfilter = false; } drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(adjusted_mode)); /* * The pipe scanline counter behaviour looks as follows when * using the TV encoder: * * time -> * * dsl=vtotal-1 | | * || || * ___| | ___| | * / | / | * / | / | * dsl=0 ___/ |_____/ | * | | | | | | * ^ ^ ^ ^ ^ * | | | | pipe vblank/first part of tv vblank * | | | bottom margin * | | active * | top margin * remainder of tv vblank * * When the TV encoder is used the pipe wants to run faster * than expected rate. During the active portion the TV * encoder stalls the pipe every few lines to keep it in * check. When the TV encoder reaches the bottom margin the * pipe simply stops. Once we reach the TV vblank the pipe is * no longer stalled and it runs at the max rate (apparently * oversample clock on gen3, cdclk on gen4). Once the pipe * reaches the pipe vtotal the pipe stops for the remainder * of the TV vblank/top margin. The pipe starts up again when * the TV encoder exits the top margin. * * To avoid huge hassles for vblank timestamping we scale * the pipe timings as if the pipe always runs at the average * rate it maintains during the active period. This also * gives us a reasonable guesstimate as to the pixel rate. * Due to the variation in the actual pipe speed the scanline * counter will give us slightly erroneous results during the * TV vblank/margins. But since vtotal was selected such that * it matches the average rate of the pipe during the active * portion the error shouldn't cause any serious grief to * vblank timestamps. * * For posterity here is the empirically derived formula * that gives us the maximum length of the pipe vblank * we can use without causing display corruption. Following * this would allow us to have a ticking scanline counter * everywhere except during the bottom margin (there the * pipe always stops). Ie. this would eliminate the second * flat portion of the above graph. However this would also * complicate vblank timestamping as the pipe vtotal would * no longer match the average rate the pipe runs at during * the active portion. Hence following this formula seems * more trouble that it's worth. * * if (GRAPHICS_VER(dev_priv) == 4) { * num = cdclk * (tv_mode->oversample >> !tv_mode->progressive); * den = tv_mode->clock; * } else { * num = tv_mode->oversample >> !tv_mode->progressive; * den = 1; * } * max_pipe_vblank_len ~= * (num * tv_htotal * (tv_vblank_len + top_margin)) / * (den * pipe_htotal); */ intel_tv_scale_mode_horiz(adjusted_mode, hdisplay, conn_state->tv.margins.left, conn_state->tv.margins.right); intel_tv_scale_mode_vert(adjusted_mode, vdisplay, tv_conn_state->margins.top, tv_conn_state->margins.bottom); drm_mode_set_crtcinfo(adjusted_mode, 0); adjusted_mode->name[0] = '\0'; /* pixel counter doesn't work on i965gm TV output */ if (IS_I965GM(dev_priv)) pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; return 0; } static void set_tv_mode_timings(struct drm_i915_private *dev_priv, const struct tv_mode *tv_mode, bool burst_ena) { u32 hctl1, hctl2, hctl3; u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) | (tv_mode->htotal << TV_HTOTAL_SHIFT); hctl2 = (tv_mode->hburst_start << 16) | (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT); if (burst_ena) hctl2 |= TV_BURST_ENA; hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) | (tv_mode->hblank_end << TV_HBLANK_END_SHIFT); vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) | (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) | (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT); vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) | (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) | (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT); vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) | (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) | (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT); if (tv_mode->veq_ena) vctl3 |= TV_EQUAL_ENA; vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) | (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT); vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) | (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT); vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) | (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT); vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); intel_de_write(dev_priv, TV_H_CTL_1, hctl1); intel_de_write(dev_priv, TV_H_CTL_2, hctl2); intel_de_write(dev_priv, TV_H_CTL_3, hctl3); intel_de_write(dev_priv, TV_V_CTL_1, vctl1); intel_de_write(dev_priv, TV_V_CTL_2, vctl2); intel_de_write(dev_priv, TV_V_CTL_3, vctl3); intel_de_write(dev_priv, TV_V_CTL_4, vctl4); intel_de_write(dev_priv, TV_V_CTL_5, vctl5); intel_de_write(dev_priv, TV_V_CTL_6, vctl6); intel_de_write(dev_priv, TV_V_CTL_7, vctl7); } static void set_color_conversion(struct drm_i915_private *dev_priv, const struct color_conversion *color_conversion) { if (!color_conversion) return; intel_de_write(dev_priv, TV_CSC_Y, (color_conversion->ry << 16) | color_conversion->gy); intel_de_write(dev_priv, TV_CSC_Y2, (color_conversion->by << 16) | color_conversion->ay); intel_de_write(dev_priv, TV_CSC_U, (color_conversion->ru << 16) | color_conversion->gu); intel_de_write(dev_priv, TV_CSC_U2, (color_conversion->bu << 16) | color_conversion->au); intel_de_write(dev_priv, TV_CSC_V, (color_conversion->rv << 16) | color_conversion->gv); intel_de_write(dev_priv, TV_CSC_V2, (color_conversion->bv << 16) | color_conversion->av); } static void intel_tv_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); u32 tv_ctl, tv_filter_ctl; u32 scctl1, scctl2, scctl3; int i, j; const struct video_levels *video_levels; const struct color_conversion *color_conversion; bool burst_ena; int xpos, ypos; unsigned int xsize, ysize; if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ tv_ctl = intel_de_read(dev_priv, TV_CTL); tv_ctl &= TV_CTL_SAVE; switch (intel_tv->type) { default: case DRM_MODE_CONNECTOR_Unknown: case DRM_MODE_CONNECTOR_Composite: tv_ctl |= TV_ENC_OUTPUT_COMPOSITE; video_levels = tv_mode->composite_levels; color_conversion = tv_mode->composite_color; burst_ena = tv_mode->burst_ena; break; case DRM_MODE_CONNECTOR_Component: tv_ctl |= TV_ENC_OUTPUT_COMPONENT; video_levels = &component_levels; if (tv_mode->burst_ena) color_conversion = &sdtv_csc_yprpb; else color_conversion = &hdtv_csc_yprpb; burst_ena = false; break; case DRM_MODE_CONNECTOR_SVIDEO: tv_ctl |= TV_ENC_OUTPUT_SVIDEO; video_levels = tv_mode->svideo_levels; color_conversion = tv_mode->svideo_color; burst_ena = tv_mode->burst_ena; break; } tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe); switch (tv_mode->oversample) { case 8: tv_ctl |= TV_OVERSAMPLE_8X; break; case 4: tv_ctl |= TV_OVERSAMPLE_4X; break; case 2: tv_ctl |= TV_OVERSAMPLE_2X; break; default: tv_ctl |= TV_OVERSAMPLE_NONE; break; } if (tv_mode->progressive) tv_ctl |= TV_PROGRESSIVE; if (tv_mode->trilevel_sync) tv_ctl |= TV_TRILEVEL_SYNC; if (tv_mode->pal_burst) tv_ctl |= TV_PAL_BURST; scctl1 = 0; if (tv_mode->dda1_inc) scctl1 |= TV_SC_DDA1_EN; if (tv_mode->dda2_inc) scctl1 |= TV_SC_DDA2_EN; if (tv_mode->dda3_inc) scctl1 |= TV_SC_DDA3_EN; scctl1 |= tv_mode->sc_reset; if (video_levels) scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT; scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT | tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; /* Enable two fixes for the chips that need them. */ if (IS_I915GM(dev_priv)) tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; set_tv_mode_timings(dev_priv, tv_mode, burst_ena); intel_de_write(dev_priv, TV_SC_CTL_1, scctl1); intel_de_write(dev_priv, TV_SC_CTL_2, scctl2); intel_de_write(dev_priv, TV_SC_CTL_3, scctl3); set_color_conversion(dev_priv, color_conversion); if (DISPLAY_VER(dev_priv) >= 4) intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000); else intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000); if (video_levels) intel_de_write(dev_priv, TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder); /* Filter ctl must be set before TV_WIN_SIZE */ tv_filter_ctl = TV_AUTO_SCALE; if (tv_conn_state->bypass_vfilter) tv_filter_ctl |= TV_V_FILTER_BYPASS; intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl); xsize = tv_mode->hblank_start - tv_mode->hblank_end; ysize = intel_tv_mode_vdisplay(tv_mode); xpos = conn_state->tv.margins.left; ypos = tv_conn_state->margins.top; xsize -= (conn_state->tv.margins.left + conn_state->tv.margins.right); ysize -= (tv_conn_state->margins.top + tv_conn_state->margins.bottom); intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos); intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize); j = 0; for (i = 0; i < 60; i++) intel_de_write(dev_priv, TV_H_LUMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 60; i++) intel_de_write(dev_priv, TV_H_CHROMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) intel_de_write(dev_priv, TV_V_LUMA(i), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) intel_de_write(dev_priv, TV_V_CHROMA(i), tv_mode->filter_table[j++]); intel_de_write(dev_priv, TV_DAC, intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE); intel_de_write(dev_priv, TV_CTL, tv_ctl); } static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) { struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; int type; /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); } save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC); save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL); /* Poll for TV detection */ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe); tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL | DAC_CTL_OVERRIDE | DAC_A_0_7_V | DAC_B_0_7_V | DAC_C_0_7_V); /* * The TV sense state should be cleared to zero on cantiga platform. Otherwise * the TV is misdetected. This is hardware requirement. */ if (IS_GM45(dev_priv)) tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); intel_de_write(dev_priv, TV_CTL, tv_ctl); intel_de_write(dev_priv, TV_DAC, tv_dac); intel_de_posting_read(dev_priv, TV_DAC); intel_crtc_wait_for_next_vblank(crtc); type = -1; tv_dac = intel_de_read(dev_priv, TV_DAC); drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac); /* * A B C * 0 1 1 Composite * 1 0 X svideo * 0 0 0 Component */ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { drm_dbg_kms(&dev_priv->drm, "Detected Composite TV connection\n"); type = DRM_MODE_CONNECTOR_Composite; } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { drm_dbg_kms(&dev_priv->drm, "Detected S-Video TV connection\n"); type = DRM_MODE_CONNECTOR_SVIDEO; } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { drm_dbg_kms(&dev_priv->drm, "Detected Component TV connection\n"); type = DRM_MODE_CONNECTOR_Component; } else { drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n"); type = -1; } intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); intel_de_write(dev_priv, TV_CTL, save_tv_ctl); intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ intel_crtc_wait_for_next_vblank(crtc); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); } return type; } /* * Here we set accurate tv format according to connector type * i.e Component TV should not be assigned by NTSC or PAL */ static void intel_tv_find_better_format(struct drm_connector *connector) { struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i; /* Component supports everything so we can keep the current mode */ if (intel_tv->type == DRM_MODE_CONNECTOR_Component) return; /* If the current mode is fine don't change it */ if (!tv_mode->component_only) return; for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { tv_mode = &tv_modes[i]; if (!tv_mode->component_only) break; } connector->state->tv.mode = i; } static int intel_tv_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector)); enum drm_connector_status status; int type; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); if (!INTEL_DISPLAY_ENABLED(i915)) return connector_status_disconnected; if (force) { struct drm_atomic_state *state; state = intel_load_detect_get_pipe(connector, ctx); if (IS_ERR(state)) return PTR_ERR(state); if (state) { type = intel_tv_detect_type(intel_tv, connector); intel_load_detect_release_pipe(connector, state, ctx); status = type < 0 ? connector_status_disconnected : connector_status_connected; } else { status = connector_status_unknown; } if (status == connector_status_connected) { intel_tv->type = type; intel_tv_find_better_format(connector); } return status; } else return connector->status; } static const struct input_res { u16 w, h; } input_res_table[] = { { 640, 480 }, { 800, 600 }, { 1024, 768 }, { 1280, 1024 }, { 848, 480 }, { 1280, 720 }, { 1920, 1080 }, }; /* Choose preferred mode according to line number of TV format */ static bool intel_tv_is_preferred_mode(const struct drm_display_mode *mode, const struct tv_mode *tv_mode) { int vdisplay = intel_tv_mode_vdisplay(tv_mode); /* prefer 480 line modes for all SD TV modes */ if (vdisplay <= 576) vdisplay = 480; return vdisplay == mode->vdisplay; } static void intel_tv_set_mode_type(struct drm_display_mode *mode, const struct tv_mode *tv_mode) { mode->type = DRM_MODE_TYPE_DRIVER; if (intel_tv_is_preferred_mode(mode, tv_mode)) mode->type |= DRM_MODE_TYPE_PREFERRED; } static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int i, count = 0; for (i = 0; i < ARRAY_SIZE(input_res_table); i++) { const struct input_res *input = &input_res_table[i]; struct drm_display_mode *mode; if (input->w > 1024 && !tv_mode->progressive && !tv_mode->component_only) continue; /* no vertical scaling with wide sources on gen3 */ if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 && input->h > intel_tv_mode_vdisplay(tv_mode)) continue; mode = drm_mode_create(connector->dev); if (!mode) continue; /* * We take the TV mode and scale it to look * like it had the expected h/vdisplay. This * provides the most information to userspace * about the actual timings of the mode. We * do ignore the margins though. */ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock); if (count == 0) { drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } intel_tv_scale_mode_horiz(mode, input->w, 0, 0); intel_tv_scale_mode_vert(mode, input->h, 0, 0); intel_tv_set_mode_type(mode, tv_mode); drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); count++; } return count; } static const struct drm_connector_funcs intel_tv_connector_funcs = { .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_tv_connector_duplicate_state, }; static int intel_tv_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_connector_state *new_state; struct drm_crtc_state *new_crtc_state; struct drm_connector_state *old_state; new_state = drm_atomic_get_new_connector_state(state, connector); if (!new_state->crtc) return 0; old_state = drm_atomic_get_old_connector_state(state, connector); new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (old_state->tv.mode != new_state->tv.mode || old_state->tv.margins.left != new_state->tv.margins.left || old_state->tv.margins.right != new_state->tv.margins.right || old_state->tv.margins.top != new_state->tv.margins.top || old_state->tv.margins.bottom != new_state->tv.margins.bottom) { /* Force a modeset. */ new_crtc_state->connectors_changed = true; } return 0; } static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .detect_ctx = intel_tv_detect, .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, .atomic_check = intel_tv_atomic_check, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { .destroy = intel_encoder_destroy, }; static void intel_tv_add_properties(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); struct drm_connector_state *conn_state = connector->state; const char *tv_format_names[ARRAY_SIZE(tv_modes)]; int i; /* BIOS margin values */ conn_state->tv.margins.left = 54; conn_state->tv.margins.top = 36; conn_state->tv.margins.right = 46; conn_state->tv.margins.bottom = 37; conn_state->tv.mode = 0; /* Create TV properties then attach current values */ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { /* 1080p50/1080p60 not supported on gen3 */ if (DISPLAY_VER(i915) == 3 && tv_modes[i].oversample == 1) break; tv_format_names[i] = tv_modes[i].name; } drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names); drm_object_attach_property(&connector->base, i915->drm.mode_config.legacy_tv_mode_property, conn_state->tv.mode); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_left_margin_property, conn_state->tv.margins.left); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_top_margin_property, conn_state->tv.margins.top); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_right_margin_property, conn_state->tv.margins.right); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_bottom_margin_property, conn_state->tv.margins.bottom); } void intel_tv_init(struct drm_i915_private *dev_priv) { struct drm_connector *connector; struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; u32 tv_dac_on, tv_dac_off, save_tv_dac; if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) return; if (!intel_bios_is_tv_present(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n"); return; } /* * Sanity check the TV output by checking to see if the * DAC register holds a value */ save_tv_dac = intel_de_read(dev_priv, TV_DAC); intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); tv_dac_on = intel_de_read(dev_priv, TV_DAC); intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); tv_dac_off = intel_de_read(dev_priv, TV_DAC); intel_de_write(dev_priv, TV_DAC, save_tv_dac); /* * If the register does not hold the state change enable * bit, (either as a 0 or a 1), assume it doesn't really * exist */ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 || (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL); if (!intel_tv) { return; } intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(intel_tv); return; } intel_encoder = &intel_tv->base; connector = &intel_connector->base; /* * The documentation, for the older chipsets at least, recommend * using a polling method rather than hotplug detection for TVs. * This is because in order to perform the hotplug detection, the PLLs * for the TV must be kept alive increasing power drain and starving * bandwidth from other encoders. Notably for instance, it causes * pipe underruns on Crestline when this encoder is supposedly idle. * * More recent chipsets favour HDMI rather than integrated S-Video. */ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC, "TV"); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; intel_encoder->pre_enable = intel_tv_pre_enable; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = PORT_NONE; intel_encoder->pipe_mask = ~0; intel_encoder->cloneable = 0; intel_tv->type = DRM_MODE_CONNECTOR_Unknown; drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); intel_tv_add_properties(connector); }
linux-master
drivers/gpu/drm/i915/display/intel_tv.c
/* * Copyright © 2008-2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #define LT_MSG_PREFIX "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] " #define LT_MSG_ARGS(_intel_dp, _dp_phy) (_intel_dp)->attached_connector->base.base.id, \ (_intel_dp)->attached_connector->base.name, \ dp_to_dig_port(_intel_dp)->base.base.base.id, \ dp_to_dig_port(_intel_dp)->base.base.name, \ drm_dp_phy_name(_dp_phy) #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \ LT_MSG_PREFIX _format, \ LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ drm_err(&dp_to_i915(_intel_dp)->drm, \ LT_MSG_PREFIX _format, \ LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ else \ lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ } while (0) static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); } static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) { intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; } static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; } static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE], enum drm_dp_phy dp_phy) { u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n"); return; } lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n", (int)sizeof(intel_dp->lttpr_phy_caps[0]), phy_caps); } static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { int ret; ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, intel_dp->lttpr_common_caps); if (ret < 0) goto reset_caps; lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n", (int)sizeof(intel_dp->lttpr_common_caps), intel_dp->lttpr_common_caps); /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ if (intel_dp->lttpr_common_caps[0] < 0x14) goto reset_caps; return true; reset_caps: intel_dp_reset_lttpr_common_caps(intel_dp); return false; } static bool intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) { u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : DP_PHY_REPEATER_MODE_NON_TRANSPARENT; return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; } static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { int lttpr_count; int i; if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) return 0; lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); /* * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are * detected as this breaks link training at least on the Dell WD19TB * dock. */ if (lttpr_count == 0) return 0; /* * See DP Standard v2.0 3.6.6.1. about the explicit disabling of * non-transparent mode and the disable->enable non-transparent mode * sequence. */ intel_dp_set_lttpr_transparent_mode(intel_dp, true); /* * In case of unsupported number of LTTPRs or failing to switch to * non-transparent mode fall-back to transparent link training mode, * still taking into account any LTTPR common lane- rate/count limits. */ if (lttpr_count < 0) return 0; if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); intel_dp_set_lttpr_transparent_mode(intel_dp, true); intel_dp_reset_lttpr_count(intel_dp); return 0; } for (i = 0; i < lttpr_count; i++) intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); return lttpr_count; } /** * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode * @intel_dp: Intel DP struct * * Read the LTTPR common and DPRX capabilities and switch to non-transparent * link training mode if any is detected and read the PHY capabilities for all * detected LTTPRs. In case of an LTTPR detection error or if the number of * LTTPRs is more than is supported (8), fall back to the no-LTTPR, * transparent mode link training mode. * * Returns: * >0 if LTTPRs were detected and the non-transparent LT mode was set. The * DPRX capabilities are read out. * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a * detection failure and the transparent LT mode was set. The DPRX * capabilities are read out. * <0 Reading out the DPRX capabilities failed. */ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int lttpr_count = 0; /* * Detecting LTTPRs must be avoided on platforms with an AUX timeout * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). */ if (!intel_dp_is_edp(intel_dp) && (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) return -EIO; if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) return -EIO; lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); } /* * The DPTX shall read the DPRX caps after LTTPR detection, so re-read * it here. */ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { intel_dp_reset_lttpr_common_caps(intel_dp); return -EIO; } return lttpr_count; } static u8 dp_voltage_max(u8 preemph) { switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; case DP_TRAIN_PRE_EMPH_LEVEL_1: return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; case DP_TRAIN_PRE_EMPH_LEVEL_2: return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; case DP_TRAIN_PRE_EMPH_LEVEL_3: default: return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; } } static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) return DP_TRAIN_PRE_EMPH_LEVEL_3; else return DP_TRAIN_PRE_EMPH_LEVEL_2; } static bool intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); } static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 voltage_max; /* * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from * the DPRX_PHY we train. */ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); else voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); drm_WARN_ON_ONCE(&i915->drm, voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); return voltage_max; } static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 preemph_max; /* * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from * the DPRX_PHY we train. */ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) preemph_max = intel_dp->preemph_max(intel_dp); else preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); drm_WARN_ON_ONCE(&i915->drm, preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); return preemph_max; } static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || DISPLAY_VER(i915) >= 11; } /* 128b/132b */ static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { u8 tx_ffe = 0; if (has_per_lane_signal_levels(intel_dp, dp_phy)) { lane = min(lane, crtc_state->lane_count - 1); tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane); } else { for (lane = 0; lane < crtc_state->lane_count; lane++) tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane)); } return tx_ffe; } /* 8b/10b */ static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { u8 v = 0; u8 p = 0; u8 voltage_max; u8 preemph_max; if (has_per_lane_signal_levels(intel_dp, dp_phy)) { lane = min(lane, crtc_state->lane_count - 1); v = drm_dp_get_adjust_request_voltage(link_status, lane); p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); } else { for (lane = 0; lane < crtc_state->lane_count; lane++) { v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); } } preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); if (p >= preemph_max) p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; v = min(v, dp_voltage_max(p)); voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); if (v >= voltage_max) v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; return v | p; } static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { if (intel_dp_is_uhbr(crtc_state)) return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state, dp_phy, link_status, lane); else return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state, dp_phy, link_status, lane); } #define TRAIN_REQ_FMT "%d/%d/%d/%d" #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \ (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT) #define TRAIN_REQ_VSWING_ARGS(link_status) \ _TRAIN_REQ_VSWING_ARGS(link_status, 0), \ _TRAIN_REQ_VSWING_ARGS(link_status, 1), \ _TRAIN_REQ_VSWING_ARGS(link_status, 2), \ _TRAIN_REQ_VSWING_ARGS(link_status, 3) #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \ (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT) #define TRAIN_REQ_PREEMPH_ARGS(link_status) \ _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \ _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \ _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \ _TRAIN_REQ_PREEMPH_ARGS(link_status, 3) #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \ drm_dp_get_adjust_tx_ffe_preset((link_status), (lane)) #define TRAIN_REQ_TX_FFE_ARGS(link_status) \ _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \ _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \ _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { int lane; if (intel_dp_is_uhbr(crtc_state)) { lt_dbg(intel_dp, dp_phy, "128b/132b, lanes: %d, " "TX FFE request: " TRAIN_REQ_FMT "\n", crtc_state->lane_count, TRAIN_REQ_TX_FFE_ARGS(link_status)); } else { lt_dbg(intel_dp, dp_phy, "8b/10b, lanes: %d, " "vswing request: " TRAIN_REQ_FMT ", " "pre-emphasis request: " TRAIN_REQ_FMT "\n", crtc_state->lane_count, TRAIN_REQ_VSWING_ARGS(link_status), TRAIN_REQ_PREEMPH_ARGS(link_status)); } for (lane = 0; lane < 4; lane++) intel_dp->train_set[lane] = intel_dp_get_lane_adjust_train(intel_dp, crtc_state, dp_phy, link_status, lane); } static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { return dp_phy == DP_PHY_DPRX ? DP_TRAINING_PATTERN_SET : DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); } static bool intel_dp_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, u8 dp_train_pat) { int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); u8 buf[sizeof(intel_dp->train_set) + 1]; int len; intel_dp_program_link_training_pattern(intel_dp, crtc_state, dp_phy, dp_train_pat); buf[0] = dp_train_pat; /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); len = crtc_state->lane_count + 1; return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; } static char dp_training_pattern_name(u8 train_pat) { switch (train_pat) { case DP_TRAINING_PATTERN_1: case DP_TRAINING_PATTERN_2: case DP_TRAINING_PATTERN_3: return '0' + train_pat; case DP_TRAINING_PATTERN_4: return '4'; default: MISSING_CASE(train_pat); return '?'; } } void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, u8 dp_train_pat) { u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); if (train_pat != DP_TRAINING_PATTERN_DISABLE) lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n", dp_training_pattern_name(train_pat)); intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); } #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s" #define _TRAIN_SET_VSWING_ARGS(train_set) \ ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \ (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : "" #define TRAIN_SET_VSWING_ARGS(train_set) \ _TRAIN_SET_VSWING_ARGS((train_set)[0]), \ _TRAIN_SET_VSWING_ARGS((train_set)[1]), \ _TRAIN_SET_VSWING_ARGS((train_set)[2]), \ _TRAIN_SET_VSWING_ARGS((train_set)[3]) #define _TRAIN_SET_PREEMPH_ARGS(train_set) \ ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \ (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : "" #define TRAIN_SET_PREEMPH_ARGS(train_set) \ _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \ _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \ _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \ _TRAIN_SET_PREEMPH_ARGS((train_set)[3]) #define _TRAIN_SET_TX_FFE_ARGS(train_set) \ ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), "" #define TRAIN_SET_TX_FFE_ARGS(train_set) \ _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \ _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \ _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \ _TRAIN_SET_TX_FFE_ARGS((train_set)[3]) void intel_dp_set_signal_levels(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; if (intel_dp_is_uhbr(crtc_state)) { lt_dbg(intel_dp, dp_phy, "128b/132b, lanes: %d, " "TX FFE presets: " TRAIN_SET_FMT "\n", crtc_state->lane_count, TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); } else { lt_dbg(intel_dp, dp_phy, "8b/10b, lanes: %d, " "vswing levels: " TRAIN_SET_FMT ", " "pre-emphasis levels: " TRAIN_SET_FMT "\n", crtc_state->lane_count, TRAIN_SET_VSWING_ARGS(intel_dp->train_set), TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); } if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) encoder->set_signal_levels(encoder, crtc_state); } static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); } static bool intel_dp_update_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { int reg = dp_phy == DP_PHY_DPRX ? DP_TRAINING_LANE0_SET : DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); int ret; intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); ret = drm_dp_dpcd_write(&intel_dp->aux, reg, intel_dp->train_set, crtc_state->lane_count); return ret == crtc_state->lane_count; } /* 128b/132b */ static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane) { return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) == DP_TX_FFE_PRESET_VALUE_MASK; } /* * 8b/10b * * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to * have self contradicting tests around this area. * * In lieu of better ideas let's just stop when we've reached the max supported * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on * whether vswing level 3 is supported or not. */ static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane) { u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0) return false; if (v + p != 3) return false; return true; } static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { int lane; for (lane = 0; lane < crtc_state->lane_count; lane++) { u8 train_set_lane = intel_dp->train_set[lane]; if (intel_dp_is_uhbr(crtc_state)) { if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane)) return false; } else { if (!intel_dp_lane_max_vswing_reached(train_set_lane)) return false; } } return true; } static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { u8 link_config[2]; link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; link_config[1] = intel_dp_is_uhbr(crtc_state) ? DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); } static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 link_bw, u8 rate_select) { u8 link_config[2]; /* Write the link configuration data */ link_config[0] = link_bw; link_config[1] = crtc_state->lane_count; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); /* eDP 1.4 rate select method. */ if (!link_bw) drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, &rate_select, 1); } /* * Prepare link training by configuring the link parameters. On DDI platforms * also enable the port here. */ static bool intel_dp_prepare_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { u8 link_bw, rate_select; if (intel_dp->prepare_link_retrain) intel_dp->prepare_link_retrain(intel_dp, crtc_state); intel_dp_compute_rate(intel_dp, crtc_state->port_clock, &link_bw, &rate_select); /* * WaEdpLinkRateDataReload * * Parade PS8461E MUX (used on varius TGL+ laptops) needs * to snoop the link rates reported by the sink when we * use LINK_RATE_SET in order to operate in jitter cleaning * mode (as opposed to redriver mode). Unfortunately it * loses track of the snooped link rates when powered down, * so we need to make it re-snoop often. Without this high * link rates are not stable. */ if (!link_bw) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n"); drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates)); } if (link_bw) lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n", link_bw); else lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_RATE_SET value %02x\n", rate_select); /* * Spec DP2.1 Section 3.5.2.16 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate */ intel_dp_update_downspread_ctrl(intel_dp, crtc_state); intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw, rate_select); return true; } static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, const u8 old_link_status[DP_LINK_STATUS_SIZE], const u8 new_link_status[DP_LINK_STATUS_SIZE]) { int lane; for (lane = 0; lane < crtc_state->lane_count; lane++) { u8 old, new; if (intel_dp_is_uhbr(crtc_state)) { old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane); new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane); } else { old = drm_dp_get_adjust_request_voltage(old_link_status, lane) | drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane); new = drm_dp_get_adjust_request_voltage(new_link_status, lane) | drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane); } if (old != new) return true; } return false; } void intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { lt_dbg(intel_dp, dp_phy, "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", link_status[0], link_status[1], link_status[2], link_status[3], link_status[4], link_status[5]); } /* * Perform the link training clock recovery phase on the given DP PHY using * training pattern 1. */ static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; int voltage_tries, cr_tries, max_cr_tries; u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; int delay_us; delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); /* clock recovery */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { lt_err(intel_dp, dp_phy, "Failed to enable link training\n"); return false; } /* * The DP 1.4 spec defines the max clock recovery retries value * as 10 but for pre-DP 1.4 devices we set a very tolerant * retry limit of 80 (4 voltage levels x 4 preemphasis levels x * x 5 identical voltage retries). Since the previous specs didn't * define a limit and created the possibility of an infinite loop * we want to prevent any sync from triggering that corner case. */ if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) max_cr_tries = 10; else max_cr_tries = 80; voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { usleep_range(delay_us, 2 * delay_us); if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { lt_err(intel_dp, dp_phy, "Failed to get link status\n"); return false; } if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n"); return true; } if (voltage_tries == 5) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n"); return false; } if (max_vswing_reached) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n"); return false; } /* Update training set as requested by target */ intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { lt_err(intel_dp, dp_phy, "Failed to update link training\n"); return false; } if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status)) ++voltage_tries; else voltage_tries = 1; memcpy(old_link_status, link_status, sizeof(link_status)); if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) max_vswing_reached = true; } intel_dp_dump_link_status(intel_dp, dp_phy, link_status); lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n", max_cr_tries); return false; } /* * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or * 1.2 devices that support it, TPS2 otherwise. */ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool source_tps3, sink_tps3, source_tps4, sink_tps4; /* UHBR+ use separate 128b/132b TPS2 */ if (intel_dp_is_uhbr(crtc_state)) return DP_TRAINING_PATTERN_2; /* * TPS4 support is mandatory for all downstream devices that * support HBR3. There are no known eDP panels that support * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. * LTTPRs must support TPS4. */ source_tps4 = intel_dp_source_supports_tps4(i915); sink_tps4 = dp_phy != DP_PHY_DPRX || drm_dp_tps4_supported(intel_dp->dpcd); if (source_tps4 && sink_tps4) { return DP_TRAINING_PATTERN_4; } else if (crtc_state->port_clock == 810000) { if (!source_tps4) lt_dbg(intel_dp, dp_phy, "8.1 Gbps link rate without source TPS4 support\n"); if (!sink_tps4) lt_dbg(intel_dp, dp_phy, "8.1 Gbps link rate without sink TPS4 support\n"); } /* * TPS3 support is mandatory for downstream devices that * support HBR2. However, not all sinks follow the spec. */ source_tps3 = intel_dp_source_supports_tps3(i915); sink_tps3 = dp_phy != DP_PHY_DPRX || drm_dp_tps3_supported(intel_dp->dpcd); if (source_tps3 && sink_tps3) { return DP_TRAINING_PATTERN_3; } else if (crtc_state->port_clock >= 540000) { if (!source_tps3) lt_dbg(intel_dp, dp_phy, ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); if (!sink_tps3) lt_dbg(intel_dp, dp_phy, ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); } return DP_TRAINING_PATTERN_2; } /* * Perform the link training channel equalization phase on the given DP PHY * using one of training pattern 2, 3 or 4 depending on the source and * sink capabilities. */ static bool intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { int tries; u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; int delay_us; delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ if (training_pattern != DP_TRAINING_PATTERN_4) training_pattern |= DP_LINK_SCRAMBLING_DISABLE; /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, training_pattern)) { lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n"); return false; } for (tries = 0; tries < 5; tries++) { usleep_range(delay_us, 2 * delay_us); if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { lt_err(intel_dp, dp_phy, "Failed to get link status\n"); break; } /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); lt_dbg(intel_dp, dp_phy, "Clock recovery check failed, cannot continue channel equalization\n"); break; } if (drm_dp_channel_eq_ok(link_status, crtc_state->lane_count)) { channel_eq = true; lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n"); break; } /* Update training set as requested by target */ intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { lt_err(intel_dp, dp_phy, "Failed to update link training\n"); break; } } /* Try 5 times, else fail and try at lower BW */ if (tries == 5) { intel_dp_dump_link_status(intel_dp, dp_phy, link_status); lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n"); } return channel_eq; } static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); u8 val = DP_TRAINING_PATTERN_DISABLE; return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; } static int intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { u8 sink_status; int ret; ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status); if (ret != 1) { lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n"); return ret < 0 ? ret : -EIO; } return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0; } /** * intel_dp_stop_link_train - stop link training * @intel_dp: DP struct * @crtc_state: state for CRTC attached to the encoder * * Stop the link training of the @intel_dp port, disabling the training * pattern in the sink's DPCD, and disabling the test pattern symbol * generation on the port. * * What symbols are output on the port after this point is * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern * with the pipe being disabled, on older platforms it's HW specific if/how an * idle pattern is generated, as the pipe is already enabled here for those. * * This function must be called after intel_dp_start_link_train(). */ void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { intel_dp->link_trained = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_DISABLE); if (intel_dp_is_uhbr(crtc_state) && wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); } } static bool intel_dp_link_train_phy(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { bool ret = false; if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) goto out; if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) goto out; ret = true; out: lt_dbg(intel_dp, dp_phy, "Link Training %s at link rate = %d, lane count = %d\n", ret ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); return ret; } static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); return; } if (intel_dp->hobl_active) { lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed with HOBL active, not enabling it from now on\n"); intel_dp->hobl_failed = true; } else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state->port_clock, crtc_state->lane_count)) { return; } /* Schedule a Hotplug Uevent to userspace to start modeset */ queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work); } /* Perform the link training on all LTTPRs and the DPRX on a link. */ static bool intel_dp_link_train_all_phys(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lttpr_count) { bool ret = true; int i; for (i = lttpr_count - 1; i >= 0; i--) { enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); if (!ret) break; } if (ret) ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); if (intel_dp->set_idle_link_train) intel_dp->set_idle_link_train(intel_dp, crtc_state); return ret; } /* * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1) */ static bool intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { u8 link_status[DP_LINK_STATUS_SIZE]; int delay_us; int try, max_tries = 20; unsigned long deadline; bool timeout = false; /* * Reset signal levels. Start transmitting 128b/132b TPS1. * * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1 * in DP_TRAINING_PATTERN_SET. */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_1)) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n"); return false; } delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); /* Read the initial TX FFE settings. */ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n"); return false; } /* Update signal levels and training set as requested. */ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n"); return false; } /* Start transmitting 128b/132b TPS2. */ if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_2)) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n"); return false; } /* Time budget for the LANEx_EQ_DONE Sequence */ deadline = jiffies + msecs_to_jiffies_timeout(400); for (try = 0; try < max_tries; try++) { usleep_range(delay_us, 2 * delay_us); /* * The delay may get updated. The transmitter shall read the * delay before link status during link training. */ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); return false; } if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n"); break; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n"); return false; } if (time_after(jiffies, deadline)) timeout = true; /* try one last time after deadline */ /* Update signal levels and training set as requested. */ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n"); return false; } } if (try == max_tries) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n"); return false; } for (;;) { if (time_after(jiffies, deadline)) timeout = true; /* try one last time after deadline */ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); return false; } if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n"); break; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n"); return false; } usleep_range(2000, 3000); } return true; } /* * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2) */ static bool intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lttpr_count) { u8 link_status[DP_LINK_STATUS_SIZE]; unsigned long deadline; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_2_CDS) != 1) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n"); return false; } /* Time budget for the LANEx_CDS_DONE Sequence */ deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20); for (;;) { bool timeout = false; if (time_after(jiffies, deadline)) timeout = true; /* try one last time after deadline */ usleep_range(2000, 3000); if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); return false; } if (drm_dp_128b132b_eq_interlane_align_done(link_status) && drm_dp_128b132b_cds_interlane_align_done(link_status) && drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); break; } if (drm_dp_128b132b_link_training_failed(link_status)) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); return false; } if (timeout) { intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n"); return false; } } return true; } /* * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.) */ static bool intel_dp_128b132b_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lttpr_count) { bool passed = false; if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); return false; } if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) passed = true; lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b Link Training %s at link rate = %d, lane count = %d\n", passed ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); return passed; } /** * intel_dp_start_link_train - start link training * @intel_dp: DP struct * @crtc_state: state for CRTC attached to the encoder * * Start the link training of the @intel_dp port, scheduling a fallback * retraining with reduced link rate/lane parameters if the link training * fails. * After calling this function intel_dp_stop_link_train() must be called. */ void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool passed; /* * TODO: Reiniting LTTPRs here won't be needed once proper connector * HW state readout is added. */ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); if (lttpr_count < 0) /* Still continue with enabling the port and link training. */ lttpr_count = 0; intel_dp_prepare_link_train(intel_dp, crtc_state); if (intel_dp_is_uhbr(crtc_state)) passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count); else passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); /* * Ignore the link failure in CI * * In fixed enviroments like CI, sometimes unexpected long HPDs are * generated by the displays. If ignore_long_hpd flag is set, such long * HPDs are ignored. And probably as a consequence of these ignored * long HPDs, subsequent link trainings are failed resulting into CI * execution failures. * * For test cases which rely on the link training or processing of HPDs * ignore_long_hpd flag can unset from the testcase. */ if (!passed && i915->display.hotplug.ignore_long_hpd) { lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n"); return; } if (!passed) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); } void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { /* * VIDEO_DIP_CTL register bit 31 should be set to '0' to not * disable SDP CRC. This is applicable for Display version 13. * Default value of bit 31 is '0' hence discarding the write * TODO: Corrective actions on SDP corruption yet to be defined */ if (intel_dp_is_uhbr(crtc_state)) /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SDP_ERROR_DETECTION_CONFIGURATION, DP_SDP_CRC16_128B132B_EN); lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); }
linux-master
drivers/gpu/drm/i915/display/intel_dp_link_training.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Daniel Vetter <[email protected]> */ /** * DOC: frontbuffer tracking * * Many features require us to track changes to the currently active * frontbuffer, especially rendering targeted at the frontbuffer. * * To be able to do so we track frontbuffers using a bitmask for all possible * frontbuffer slots through intel_frontbuffer_track(). The functions in this * file are then called when the contents of the frontbuffer are invalidated, * when frontbuffer rendering has stopped again to flush out all the changes * and when the frontbuffer is exchanged with a flip. Subsystems interested in * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks * into the relevant places and filter for the frontbuffer slots that they are * interested int. * * On a high level there are two types of powersaving features. The first one * work like a special cache (FBC and PSR) and are interested when they should * stop caching and when to restart caching. This is done by placing callbacks * into the invalidate and the flush functions: At invalidate the caching must * be stopped and at flush time it can be restarted. And maybe they need to know * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate * and flush on its own) which can be achieved with placing callbacks into the * flip functions. * * The other type of display power saving feature only cares about busyness * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate * busyness. There is no direct way to detect idleness. Instead an idle timer * work delayed work should be started from the flush and flip functions and * cancelled as soon as busyness is detected. */ #include "i915_drv.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_drrs.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" #include "intel_psr.h" /** * frontbuffer_flush - flush frontbuffer * @i915: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the flush * * This function gets called every time rendering on the given planes has * completed and frontbuffer caching can be started again. Flushes will get * delayed if they're blocked by some outstanding asynchronous rendering. * * Can be called without any locks held. */ static void frontbuffer_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { /* Delay flushing when rings are still busy.*/ spin_lock(&i915->display.fb_tracking.lock); frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits; spin_unlock(&i915->display.fb_tracking.lock); if (!frontbuffer_bits) return; trace_intel_frontbuffer_flush(i915, frontbuffer_bits, origin); might_sleep(); intel_drrs_flush(i915, frontbuffer_bits); intel_psr_flush(i915, frontbuffer_bits, origin); intel_fbc_flush(i915, frontbuffer_bits, origin); } /** * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip * @i915: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after scheduling a flip on @obj. The actual * frontbuffer flushing will be delayed until completion is signalled with * intel_frontbuffer_flip_complete. If an invalidate happens in between this * flush will be cancelled. * * Can be called without any locks held. */ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, unsigned frontbuffer_bits) { spin_lock(&i915->display.fb_tracking.lock); i915->display.fb_tracking.flip_bits |= frontbuffer_bits; /* Remove stale busy bits due to the old buffer. */ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; spin_unlock(&i915->display.fb_tracking.lock); } /** * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip * @i915: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after the flip has been latched and will complete * on the next vblank. It will execute the flush if it hasn't been cancelled yet. * * Can be called without any locks held. */ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, unsigned frontbuffer_bits) { spin_lock(&i915->display.fb_tracking.lock); /* Mask any cancelled flips. */ frontbuffer_bits &= i915->display.fb_tracking.flip_bits; i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; spin_unlock(&i915->display.fb_tracking.lock); if (frontbuffer_bits) frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); } /** * intel_frontbuffer_flip - synchronous frontbuffer flip * @i915: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after scheduling a flip on @obj. This is for * synchronous plane updates which will happen on the next vblank and which will * not get delayed by pending gpu rendering. * * Can be called without any locks held. */ void intel_frontbuffer_flip(struct drm_i915_private *i915, unsigned frontbuffer_bits) { spin_lock(&i915->display.fb_tracking.lock); /* Remove stale busy bits due to the old buffer. */ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; spin_unlock(&i915->display.fb_tracking.lock); frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); } void __intel_fb_invalidate(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); if (origin == ORIGIN_CS) { spin_lock(&i915->display.fb_tracking.lock); i915->display.fb_tracking.busy_bits |= frontbuffer_bits; i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; spin_unlock(&i915->display.fb_tracking.lock); } trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin); might_sleep(); intel_psr_invalidate(i915, frontbuffer_bits, origin); intel_drrs_invalidate(i915, frontbuffer_bits); intel_fbc_invalidate(i915, frontbuffer_bits, origin); } void __intel_fb_flush(struct intel_frontbuffer *front, enum fb_op_origin origin, unsigned int frontbuffer_bits) { struct drm_i915_private *i915 = intel_bo_to_i915(front->obj); if (origin == ORIGIN_CS) { spin_lock(&i915->display.fb_tracking.lock); /* Filter out new bits since rendering started. */ frontbuffer_bits &= i915->display.fb_tracking.busy_bits; i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; spin_unlock(&i915->display.fb_tracking.lock); } if (frontbuffer_bits) frontbuffer_flush(i915, frontbuffer_bits, origin); } static int frontbuffer_active(struct i915_active *ref) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), write); kref_get(&front->ref); return 0; } static void frontbuffer_retire(struct i915_active *ref) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), write); intel_frontbuffer_flush(front, ORIGIN_CS); intel_frontbuffer_put(front); } static void frontbuffer_release(struct kref *ref) __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); struct drm_i915_gem_object *obj = front->obj; drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits)); i915_ggtt_clear_scanout(obj); i915_gem_object_set_frontbuffer(obj, NULL); spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock); i915_active_fini(&front->write); i915_gem_object_put(obj); kfree_rcu(front, rcu); } struct intel_frontbuffer * intel_frontbuffer_get(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = intel_bo_to_i915(obj); struct intel_frontbuffer *front, *cur; front = i915_gem_object_get_frontbuffer(obj); if (front) return front; front = kmalloc(sizeof(*front), GFP_KERNEL); if (!front) return NULL; front->obj = obj; kref_init(&front->ref); atomic_set(&front->bits, 0); i915_active_init(&front->write, frontbuffer_active, frontbuffer_retire, I915_ACTIVE_RETIRE_SLEEPS); spin_lock(&i915->display.fb_tracking.lock); cur = i915_gem_object_set_frontbuffer(obj, front); spin_unlock(&i915->display.fb_tracking.lock); if (cur != front) kfree(front); return cur; } void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, &intel_bo_to_i915(front->obj)->display.fb_tracking.lock); } /** * intel_frontbuffer_track - update frontbuffer tracking * @old: current buffer for the frontbuffer slots * @new: new buffer for the frontbuffer slots * @frontbuffer_bits: bitmask of frontbuffer slots * * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them * from @old and setting them in @new. Both @old and @new can be NULL. */ void intel_frontbuffer_track(struct intel_frontbuffer *old, struct intel_frontbuffer *new, unsigned int frontbuffer_bits) { /* * Control of individual bits within the mask are guarded by * the owning plane->mutex, i.e. we can never see concurrent * manipulation of individual bits. But since the bitfield as a whole * is updated using RMW, we need to use atomics in order to update * the bits. */ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > BITS_PER_TYPE(atomic_t)); BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm, !(atomic_read(&old->bits) & frontbuffer_bits)); atomic_andnot(frontbuffer_bits, &old->bits); } if (new) { drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm, atomic_read(&new->bits) & frontbuffer_bits); atomic_or(frontbuffer_bits, &new->bits); } }
linux-master
drivers/gpu/drm/i915/display/intel_frontbuffer.c
/* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Keith Packard <[email protected]> * */ #include <linux/export.h> #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/timekeeping.h> #include <linux/types.h> #include <asm/byteorder.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_dsc_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> #include "g4x_dp.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_backlight.h" #include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_cx0_phy.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_hotplug_irq.h" #include "intel_lspcon.h" #include "intel_lvds.h" #include "intel_panel.h" #include "intel_pch_display.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vrr.h" #include "intel_crtc_state_dump.h" /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 /* DP DSC FEC Overhead factor = 1/(0.972261) */ #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 /* Compliance test status bits */ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) /* Constants for DP DSC configurations */ static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; /* With Single pipe configuration, HW is capable of supporting maximum * of 4 slices per line. */ static const u8 valid_dsc_slicecount[] = {1, 2, 4}; /** * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. * * This function is not safe to use prior to encoder type being set. */ bool intel_dp_is_edp(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); return dig_port->base.type == INTEL_OUTPUT_EDP; } static void intel_dp_unset_edid(struct intel_dp *intel_dp); /* Is link rate UHBR and thus 128b/132b? */ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) { return crtc_state->port_clock >= 1000000; } static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) { intel_dp->sink_rates[0] = 162000; intel_dp->num_sink_rates = 1; } /* update sink rates from dpcd */ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) { static const int dp_rates[] = { 162000, 270000, 540000, 810000 }; int i, max_rate; int max_lttpr_rate; if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ static const int quirk_rates[] = { 162000, 270000, 324000 }; memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); return; } /* * Sink rates for 8b/10b. */ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); if (max_lttpr_rate) max_rate = min(max_rate, max_lttpr_rate); for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { if (dp_rates[i] > max_rate) break; intel_dp->sink_rates[i] = dp_rates[i]; } /* * Sink rates for 128b/132b. If set, sink should support all 8b/10b * rates and 10 Gbps. */ if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { u8 uhbr_rates = 0; BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); drm_dp_dpcd_readb(&intel_dp->aux, DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { /* We have a repeater */ if (intel_dp->lttpr_common_caps[0] >= 0x20 && intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & DP_PHY_REPEATER_128B132B_SUPPORTED) { /* Repeater supports 128b/132b, valid UHBR rates */ uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; } else { /* Does not support 128b/132b */ uhbr_rates = 0; } } if (uhbr_rates & DP_UHBR10) intel_dp->sink_rates[i++] = 1000000; if (uhbr_rates & DP_UHBR13_5) intel_dp->sink_rates[i++] = 1350000; if (uhbr_rates & DP_UHBR20) intel_dp->sink_rates[i++] = 2000000; } intel_dp->num_sink_rates = i; } static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; intel_dp_set_dpcd_sink_rates(intel_dp); if (intel_dp->num_sink_rates) return; drm_err(&dp_to_i915(intel_dp)->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name); intel_dp_set_default_sink_rates(intel_dp); } static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) { intel_dp->max_sink_lane_count = 1; } static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); switch (intel_dp->max_sink_lane_count) { case 1: case 2: case 4: return; } drm_err(&dp_to_i915(intel_dp)->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, intel_dp->max_sink_lane_count); intel_dp_set_default_max_sink_lane_count(intel_dp); } /* Get length of rates array potentially limited by max_rate. */ static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) { int i; /* Limit results by potentially reduced max rate */ for (i = 0; i < len; i++) { if (rates[len - i - 1] <= max_rate) return len - i; } return 0; } /* Get length of common rates array potentially limited by max_rate. */ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, int max_rate) { return intel_dp_rate_limit_len(intel_dp->common_rates, intel_dp->num_common_rates, max_rate); } static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) { if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, index < 0 || index >= intel_dp->num_common_rates)) return 162000; return intel_dp->common_rates[index]; } /* Theoretical max between source and sink */ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) { return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) { int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); int max_lanes = dig_port->max_lanes; if (vbt_max_lanes) max_lanes = min(max_lanes, vbt_max_lanes); return max_lanes; } /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dp_max_source_lane_count(dig_port); int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); if (lttpr_max) sink_max = min(sink_max, lttpr_max); return min3(source_max, sink_max, fia_max); } int intel_dp_max_lane_count(struct intel_dp *intel_dp) { switch (intel_dp->max_link_lane_count) { case 1: case 2: case 4: return intel_dp->max_link_lane_count; default: MISSING_CASE(intel_dp->max_link_lane_count); return 1; } } /* * The required data bandwidth for a mode with given pixel clock and bpp. This * is the required net bandwidth independent of the data bandwidth efficiency. */ int intel_dp_link_required(int pixel_clock, int bpp) { /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ return DIV_ROUND_UP(pixel_clock * bpp, 8); } /* * Given a link rate and lanes, get the data bandwidth. * * Data bandwidth is the actual payload rate, which depends on the data * bandwidth efficiency and the link rate. * * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by * coincidence, the port clock in kHz matches the data bandwidth in kBps, and * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no * longer holds for data bandwidth as soon as FEC or MST is taken into account!) * * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 * does not match the symbol clock, the port clock (not even if you think in * terms of a byte clock), nor the data bandwidth. It only matches the link bit * rate in units of 10000 bps. */ int intel_dp_max_data_rate(int max_link_rate, int max_lanes) { if (max_link_rate >= 1000000) { /* * UHBR rates always use 128b/132b channel encoding, and have * 97.71% data bandwidth efficiency. Consider max_link_rate the * link bit rate in units of 10000 bps. */ int max_link_rate_kbps = max_link_rate * 10; max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); max_link_rate = max_link_rate_kbps / 8; } /* * Lower than UHBR rates always use 8b/10b channel encoding, and have * 80% data bandwidth efficiency for SST non-FEC. However, this turns * out to be a nop by coincidence, and can be skipped: * * int max_link_rate_kbps = max_link_rate * 10; * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); * max_link_rate = max_link_rate_kbps / 8; */ return max_link_rate * max_lanes; } bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); return DISPLAY_VER(dev_priv) >= 12 || (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A); } static int dg2_max_source_rate(struct intel_dp *intel_dp) { return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; } static int ehl_max_source_rate(struct intel_dp *intel_dp) { if (intel_dp_is_edp(intel_dp)) return 540000; return 810000; } static int mtl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); if (intel_is_c10phy(i915, phy)) return intel_dp_is_edp(intel_dp) ? 675000 : 810000; return 2000000; } static int vbt_max_link_rate(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int max_rate; max_rate = intel_bios_dp_max_link_rate(encoder->devdata); if (intel_dp_is_edp(intel_dp)) { struct intel_connector *connector = intel_dp->attached_connector; int edp_max_rate = connector->panel.vbt.edp.max_link_rate; if (max_rate && edp_max_rate) max_rate = min(max_rate, edp_max_rate); else if (edp_max_rate) max_rate = edp_max_rate; } return max_rate; } static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { /* The values must be in increasing order */ static const int mtl_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 810000, 1000000, 1350000, 2000000, }; static const int icl_rates[] = { 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 1000000, 1350000, }; static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000 }; static const int skl_rates[] = { 162000, 216000, 270000, 324000, 432000, 540000 }; static const int hsw_rates[] = { 162000, 270000, 540000 }; static const int g4x_rates[] = { 162000, 270000 }; struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); const int *source_rates; int size, max_rate = 0, vbt_max_rate; /* This should only be done once */ drm_WARN_ON(&dev_priv->drm, intel_dp->source_rates || intel_dp->num_source_rates); if (DISPLAY_VER(dev_priv) >= 14) { source_rates = mtl_rates; size = ARRAY_SIZE(mtl_rates); max_rate = mtl_max_source_rate(intel_dp); } else if (DISPLAY_VER(dev_priv) >= 11) { source_rates = icl_rates; size = ARRAY_SIZE(icl_rates); if (IS_DG2(dev_priv)) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) max_rate = 810000; else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else max_rate = icl_max_source_rate(intel_dp); } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { source_rates = bxt_rates; size = ARRAY_SIZE(bxt_rates); } else if (DISPLAY_VER(dev_priv) == 9) { source_rates = skl_rates; size = ARRAY_SIZE(skl_rates); } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) || IS_BROADWELL(dev_priv)) { source_rates = hsw_rates; size = ARRAY_SIZE(hsw_rates); } else { source_rates = g4x_rates; size = ARRAY_SIZE(g4x_rates); } vbt_max_rate = vbt_max_link_rate(intel_dp); if (max_rate && vbt_max_rate) max_rate = min(max_rate, vbt_max_rate); else if (vbt_max_rate) max_rate = vbt_max_rate; if (max_rate) size = intel_dp_rate_limit_len(source_rates, size, max_rate); intel_dp->source_rates = source_rates; intel_dp->num_source_rates = size; } static int intersect_rates(const int *source_rates, int source_len, const int *sink_rates, int sink_len, int *common_rates) { int i = 0, j = 0, k = 0; while (i < source_len && j < sink_len) { if (source_rates[i] == sink_rates[j]) { if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) return k; common_rates[k] = source_rates[i]; ++k; ++i; ++j; } else if (source_rates[i] < sink_rates[j]) { ++i; } else { ++j; } } return k; } /* return index of rate in rates array, or -1 if not found */ static int intel_dp_rate_index(const int *rates, int len, int rate) { int i; for (i = 0; i < len; i++) if (rate == rates[i]) return i; return -1; } static void intel_dp_set_common_rates(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); drm_WARN_ON(&i915->drm, !intel_dp->num_source_rates || !intel_dp->num_sink_rates); intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, intel_dp->num_source_rates, intel_dp->sink_rates, intel_dp->num_sink_rates, intel_dp->common_rates); /* Paranoia, there should always be something in common. */ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { intel_dp->common_rates[0] = 162000; intel_dp->num_common_rates = 1; } } static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, u8 lane_count) { /* * FIXME: we need to synchronize the current link parameters with * hardware readout. Currently fast link training doesn't work on * boot-up. */ if (link_rate == 0 || link_rate > intel_dp->max_link_rate) return false; if (lane_count == 0 || lane_count > intel_dp_max_lane_count(intel_dp)) return false; return true; } static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int link_rate, u8 lane_count) { /* FIXME figure out what we actually want here */ const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(intel_dp->attached_connector); int mode_rate, max_rate; mode_rate = intel_dp_link_required(fixed_mode->clock, 18); max_rate = intel_dp_max_data_rate(link_rate, lane_count); if (mode_rate > max_rate) return false; return true; } int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int index; /* * TODO: Enable fallback on MST links once MST link compute can handle * the fallback params. */ if (intel_dp->is_mst) { drm_err(&i915->drm, "Link Training Unsuccessful\n"); return -1; } if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with max parameters\n"); intel_dp->use_max_params = true; return 0; } index = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, link_rate); if (index > 0) { if (intel_dp_is_edp(intel_dp) && !intel_dp_can_link_train_fallback_for_edp(intel_dp, intel_dp_common_rate(intel_dp, index - 1), lane_count)) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with same parameters\n"); return 0; } intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { if (intel_dp_is_edp(intel_dp) && !intel_dp_can_link_train_fallback_for_edp(intel_dp, intel_dp_max_common_rate(intel_dp), lane_count >> 1)) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with same parameters\n"); return 0; } intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); intel_dp->max_link_lane_count = lane_count >> 1; } else { drm_err(&i915->drm, "Link Training Unsuccessful\n"); return -1; } return 0; } u32 intel_dp_mode_to_fec_clock(u32 mode_clock) { return div_u64(mul_u32_u32(mode_clock, 1000000U), DP_DSC_FEC_OVERHEAD_FACTOR); } static int small_joiner_ram_size_bits(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 13) return 17280 * 8; else if (DISPLAY_VER(i915) >= 11) return 7680 * 8; else return 6144 * 8; } u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) { u32 bits_per_pixel = bpp; int i; /* Error out if the max bpp is less than smallest allowed valid bpp */ if (bits_per_pixel < valid_dsc_bpp[0]) { drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", bits_per_pixel, valid_dsc_bpp[0]); return 0; } /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ if (DISPLAY_VER(i915) >= 13) { bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); /* * According to BSpec, 27 is the max DSC output bpp, * 8 is the min DSC output bpp. * While we can still clamp higher bpp values to 27, saving bandwidth, * if it is required to oompress up to bpp < 8, means we can't do * that and probably means we can't fit the required mode, even with * DSC enabled. */ if (bits_per_pixel < 8) { drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n", bits_per_pixel); return 0; } bits_per_pixel = min_t(u32, bits_per_pixel, 27); } else { /* Find the nearest match in the array of known BPPs from VESA */ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { if (bits_per_pixel < valid_dsc_bpp[i + 1]) break; } drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n", bits_per_pixel, valid_dsc_bpp[i]); bits_per_pixel = valid_dsc_bpp[i]; } return bits_per_pixel; } u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay, bool bigjoiner, u32 pipe_bpp, u32 timeslots) { u32 bits_per_pixel, max_bpp_small_joiner_ram; /* * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* * (LinkSymbolClock)* 8 * (TimeSlots / 64) * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) * for MST -> TimeSlots has to be calculated, based on mode requirements * * Due to FEC overhead, the available bw is reduced to 97.2261%. * To support the given mode: * Bandwidth required should be <= Available link Bandwidth * FEC Overhead * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / * (ModeClock / FEC Overhead) * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / * (ModeClock / FEC Overhead * 8) */ bits_per_pixel = ((link_clock * lane_count) * timeslots) / (intel_dp_mode_to_fec_clock(mode_clock) * 8); drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " "total bw %u pixel clock %u\n", bits_per_pixel, timeslots, (link_clock * lane_count * 8), intel_dp_mode_to_fec_clock(mode_clock)); /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; if (bigjoiner) max_bpp_small_joiner_ram *= 2; /* * Greatest allowed DSC BPP = MIN (output BPP from available Link BW * check, output bpp from small joiner RAM check) */ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); if (bigjoiner) { u32 max_bpp_bigjoiner = i915->display.cdclk.max_cdclk_freq * 48 / intel_dp_mode_to_fec_clock(mode_clock); bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); } bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); /* * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, * fractional part is 0 */ return bits_per_pixel << 4; } u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, int mode_hdisplay, bool bigjoiner) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 min_slice_count, i; int max_slice_width; if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) min_slice_count = DIV_ROUND_UP(mode_clock, DP_DSC_MAX_ENC_THROUGHPUT_0); else min_slice_count = DIV_ROUND_UP(mode_clock, DP_DSC_MAX_ENC_THROUGHPUT_1); /* * Due to some DSC engine BW limitations, we need to enable second * slice and VDSC engine, whenever we approach close enough to max CDCLK */ if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) min_slice_count = max_t(u8, min_slice_count, 2); max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { drm_dbg_kms(&i915->drm, "Unsupported slice width %d by DP DSC Sink device\n", max_slice_width); return 0; } /* Also take into account max slice width */ min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); /* Find the closest match to the valid slice count values */ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; if (test_slice_count > drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) break; /* big joiner needs small joiner to be enabled */ if (bigjoiner && test_slice_count < 4) continue; if (min_slice_count <= test_slice_count) return test_slice_count; } drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", min_slice_count); return 0; } static bool source_can_output(struct intel_dp *intel_dp, enum intel_output_format format) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); switch (format) { case INTEL_OUTPUT_FORMAT_RGB: return true; case INTEL_OUTPUT_FORMAT_YCBCR444: /* * No YCbCr output support on gmch platforms. * Also, ILK doesn't seem capable of DP YCbCr output. * The displayed image is severly corrupted. SNB+ is fine. */ return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); case INTEL_OUTPUT_FORMAT_YCBCR420: /* Platform < Gen 11 cannot output YCbCr420 format */ return DISPLAY_VER(i915) >= 11; default: MISSING_CASE(format); return false; } } static bool dfp_can_convert_from_rgb(struct intel_dp *intel_dp, enum intel_output_format sink_format) { if (!drm_dp_is_branch(intel_dp->dpcd)) return false; if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) return intel_dp->dfp.rgb_to_ycbcr; if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return intel_dp->dfp.rgb_to_ycbcr && intel_dp->dfp.ycbcr_444_to_420; return false; } static bool dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, enum intel_output_format sink_format) { if (!drm_dp_is_branch(intel_dp->dpcd)) return false; if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) return intel_dp->dfp.ycbcr_444_to_420; return false; } static enum intel_output_format intel_dp_output_format(struct intel_connector *connector, enum intel_output_format sink_format) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum intel_output_format output_format; if (intel_dp->force_dsc_output_format) return intel_dp->force_dsc_output_format; if (sink_format == INTEL_OUTPUT_FORMAT_RGB || dfp_can_convert_from_rgb(intel_dp, sink_format)) output_format = INTEL_OUTPUT_FORMAT_RGB; else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) output_format = INTEL_OUTPUT_FORMAT_YCBCR444; else output_format = INTEL_OUTPUT_FORMAT_YCBCR420; drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); return output_format; } int intel_dp_min_bpp(enum intel_output_format output_format) { if (output_format == INTEL_OUTPUT_FORMAT_RGB) return 6 * 3; else return 8 * 3; } static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) { /* * bpp value was assumed to RGB format. And YCbCr 4:2:0 output * format of the number of bytes per pixel will be half the number * of bytes of RGB pixel. */ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) bpp /= 2; return bpp; } static enum intel_output_format intel_dp_sink_format(struct intel_connector *connector, const struct drm_display_mode *mode) { const struct drm_display_info *info = &connector->base.display_info; if (drm_mode_is_420_only(info, mode)) return INTEL_OUTPUT_FORMAT_YCBCR420; return INTEL_OUTPUT_FORMAT_RGB; } static int intel_dp_mode_min_output_bpp(struct intel_connector *connector, const struct drm_display_mode *mode) { enum intel_output_format output_format, sink_format; sink_format = intel_dp_sink_format(connector, mode); output_format = intel_dp_output_format(connector, sink_format); return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); } static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, int hdisplay) { /* * Older platforms don't like hdisplay==4096 with DP. * * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline * and frame counter increment), but we don't get vblank interrupts, * and the pipe underruns immediately. The link also doesn't seem * to get trained properly. * * On CHV the vblank interrupts don't seem to disappear but * otherwise the symptoms are similar. * * TODO: confirm the behaviour on HSW+ */ return hdisplay == 4096 && !HAS_DDI(dev_priv); } static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; const struct drm_display_info *info = &connector->base.display_info; int max_tmds_clock = intel_dp->dfp.max_tmds_clock; /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ if (max_tmds_clock && info->max_tmds_clock) max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); return max_tmds_clock; } static enum drm_mode_status intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, int clock, int bpc, enum intel_output_format sink_format, bool respect_downstream_limits) { int tmds_clock, min_tmds_clock, max_tmds_clock; if (!respect_downstream_limits) return MODE_OK; tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); min_tmds_clock = intel_dp->dfp.min_tmds_clock; max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); if (min_tmds_clock && tmds_clock < min_tmds_clock) return MODE_CLOCK_LOW; if (max_tmds_clock && tmds_clock > max_tmds_clock) return MODE_CLOCK_HIGH; return MODE_OK; } static enum drm_mode_status intel_dp_mode_valid_downstream(struct intel_connector *connector, const struct drm_display_mode *mode, int target_clock) { struct intel_dp *intel_dp = intel_attached_dp(connector); const struct drm_display_info *info = &connector->base.display_info; enum drm_mode_status status; enum intel_output_format sink_format; /* If PCON supports FRL MODE, check FRL bandwidth constraints */ if (intel_dp->dfp.pcon_max_frl_bw) { int target_bw; int max_frl_bw; int bpp = intel_dp_mode_min_output_bpp(connector, mode); target_bw = bpp * target_clock; max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; /* converting bw from Gbps to Kbps*/ max_frl_bw = max_frl_bw * 1000000; if (target_bw > max_frl_bw) return MODE_CLOCK_HIGH; return MODE_OK; } if (intel_dp->dfp.max_dotclock && target_clock > intel_dp->dfp.max_dotclock) return MODE_CLOCK_HIGH; sink_format = intel_dp_sink_format(connector, mode); /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 8, sink_format, true); if (status != MODE_OK) { if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, mode)) return status; sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 8, sink_format, true); if (status != MODE_OK) return status; } return MODE_OK; } bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, int hdisplay, int clock) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp_can_bigjoiner(intel_dp)) return false; return clock > i915->max_dotclk_freq || hdisplay > 5120; } static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *_connector, struct drm_display_mode *mode) { struct intel_connector *connector = to_intel_connector(_connector); struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct drm_display_mode *fixed_mode; int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; int max_dotclk = dev_priv->max_dotclk_freq; u16 dsc_max_output_bpp = 0; u8 dsc_slice_count = 0; enum drm_mode_status status; bool dsc = false, bigjoiner = false; if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; fixed_mode = intel_panel_fixed_mode(connector, mode); if (intel_dp_is_edp(intel_dp) && fixed_mode) { status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; target_clock = fixed_mode->clock; } if (mode->clock < 10000) return MODE_CLOCK_LOW; if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { bigjoiner = true; max_dotclk *= 2; } if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) return MODE_H_ILLEGAL; max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, intel_dp_mode_min_output_bpp(connector, mode)); if (HAS_DSC(dev_priv) && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { /* * TBD pass the connector BPC, * for now U8_MAX so that max BPC on that platform would be picked */ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); /* * Output bpp is stored in 6.4 format so right shift by 4 to get the * integer value since we support only integer values of bpp. */ if (intel_dp_is_edp(intel_dp)) { dsc_max_output_bpp = drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; dsc_slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { dsc_max_output_bpp = intel_dp_dsc_get_output_bpp(dev_priv, max_link_clock, max_lanes, target_clock, mode->hdisplay, bigjoiner, pipe_bpp, 64) >> 4; dsc_slice_count = intel_dp_dsc_get_slice_count(intel_dp, target_clock, mode->hdisplay, bigjoiner); } dsc = dsc_max_output_bpp && dsc_slice_count; } /* * Big joiner configuration needs DSC for TGL which is not true for * XE_LPD where uncompressed joiner is supported. */ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) return MODE_CLOCK_HIGH; if (mode_rate > max_rate && !dsc) return MODE_CLOCK_HIGH; status = intel_dp_mode_valid_downstream(connector, mode, target_clock); if (status != MODE_OK) return status; return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); } bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); } bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 10; } static void snprintf_int_array(char *str, size_t len, const int *array, int nelem) { int i; str[0] = '\0'; for (i = 0; i < nelem; i++) { int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); if (r >= len) return; str += r; len -= r; } } static void intel_dp_print_rates(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); char str[128]; /* FIXME: too big for stack? */ if (!drm_debug_enabled(DRM_UT_KMS)) return; snprintf_int_array(str, sizeof(str), intel_dp->source_rates, intel_dp->num_source_rates); drm_dbg_kms(&i915->drm, "source rates: %s\n", str); snprintf_int_array(str, sizeof(str), intel_dp->sink_rates, intel_dp->num_sink_rates); drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); snprintf_int_array(str, sizeof(str), intel_dp->common_rates, intel_dp->num_common_rates); drm_dbg_kms(&i915->drm, "common rates: %s\n", str); } int intel_dp_max_link_rate(struct intel_dp *intel_dp) { int len; len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); return intel_dp_common_rate(intel_dp, len - 1); } int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i = intel_dp_rate_index(intel_dp->sink_rates, intel_dp->num_sink_rates, rate); if (drm_WARN_ON(&i915->drm, i < 0)) i = 0; return i; } void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select) { /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { *link_bw = 0; *rate_select = intel_dp_rate_select(intel_dp, port_clock); } else { *link_bw = drm_dp_link_rate_to_bw_code(port_clock); *rate_select = 0; } } bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; return connector->base.display_info.is_hdmi; } static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* On TGL, FEC is supported on all Pipes */ if (DISPLAY_VER(dev_priv) >= 12) return true; if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) return true; return false; } static bool intel_dp_supports_fec(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { return intel_dp_source_supports_fec(intel_dp, pipe_config) && drm_dp_sink_supports_fec(intel_dp->fec_capable); } static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) return false; return intel_dsc_source_support(crtc_state) && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int bpc, bool respect_downstream_limits) { int clock = crtc_state->hw.adjusted_mode.crtc_clock; /* * Current bpc could already be below 8bpc due to * FDI bandwidth constraints or other limits. * HDMI minimum is 8bpc however. */ bpc = max(bpc, 8); /* * We will never exceed downstream TMDS clock limits while * attempting deep color. If the user insists on forcing an * out of spec mode they will have to be satisfied with 8bpc. */ if (!respect_downstream_limits) bpc = 8; for (; bpc >= 8; bpc -= 2) { if (intel_hdmi_bpc_possible(crtc_state, bpc, intel_dp_has_hdmi_sink(intel_dp)) && intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, respect_downstream_limits) == MODE_OK) return bpc; } return -EINVAL; } static int intel_dp_max_bpp(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool respect_downstream_limits) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *intel_connector = intel_dp->attached_connector; int bpp, bpc; bpc = crtc_state->pipe_bpp / 3; if (intel_dp->dfp.max_bpc) bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); if (intel_dp->dfp.min_tmds_clock) { int max_hdmi_bpc; max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, respect_downstream_limits); if (max_hdmi_bpc < 0) return 0; bpc = min(bpc, max_hdmi_bpc); } bpp = bpc * 3; if (intel_dp_is_edp(intel_dp)) { /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && intel_connector->panel.vbt.edp.bpp && intel_connector->panel.vbt.edp.bpp < bpp) { drm_dbg_kms(&dev_priv->drm, "clamping bpp for eDP panel to BIOS-provided %i\n", intel_connector->panel.vbt.edp.bpp); bpp = intel_connector->panel.vbt.edp.bpp; } } return bpp; } /* Adjust link config limits based on compliance test requests. */ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct link_config_limits *limits) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); /* For DP Compliance we override the computed bpp for the pipe */ if (intel_dp->compliance.test_data.bpc != 0) { int bpp = 3 * intel_dp->compliance.test_data.bpc; limits->min_bpp = limits->max_bpp = bpp; pipe_config->dither_force_disable = bpp == 6 * 3; drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); } /* Use values requested by Compliance Test Request */ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { int index; /* Validate the compliance test data since max values * might have changed due to link train fallback. */ if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, intel_dp->compliance.test_lane_count)) { index = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, intel_dp->compliance.test_link_rate); if (index >= 0) limits->min_rate = limits->max_rate = intel_dp->compliance.test_link_rate; limits->min_lane_count = limits->max_lane_count = intel_dp->compliance.test_lane_count; } } } static bool has_seamless_m_n(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); /* * Seamless M/N reprogramming only implemented * for BDW+ double buffered M/N registers so far. */ return HAS_DOUBLE_BUFFERED_M_N(i915) && intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; } static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* FIXME a bit of a mess wrt clock vs. crtc_clock */ if (has_seamless_m_n(connector)) return intel_panel_highest_mode(connector, adjusted_mode)->clock; else return adjusted_mode->crtc_clock; } /* Optimize link config in order: max bpp, min clock, min lanes */ static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state, const struct link_config_limits *limits) { int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); int mode_rate, link_rate, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); mode_rate = intel_dp_link_required(clock, output_bpp); for (i = 0; i < intel_dp->num_common_rates; i++) { link_rate = intel_dp_common_rate(intel_dp, i); if (link_rate < limits->min_rate || link_rate > limits->max_rate) continue; for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { link_avail = intel_dp_max_data_rate(link_rate, lane_count); if (mode_rate <= link_avail) { pipe_config->lane_count = lane_count; pipe_config->pipe_bpp = bpp; pipe_config->port_clock = link_rate; return 0; } } } } return -EINVAL; } int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i, num_bpc; u8 dsc_bpc[3] = {0}; u8 dsc_max_bpc; /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ if (DISPLAY_VER(i915) >= 12) dsc_max_bpc = min_t(u8, 12, max_req_bpc); else dsc_max_bpc = min_t(u8, 10, max_req_bpc); num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, dsc_bpc); for (i = 0; i < num_bpc; i++) { if (dsc_max_bpc >= dsc_bpc[i]) return dsc_bpc[i] * 3; } return 0; } static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); return DISPLAY_VER(i915) >= 14 ? 2 : 1; } static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) { return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT; } static int intel_dp_get_slice_height(int vactive) { int slice_height; /* * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 * lines is an optimal slice height, but any size can be used as long as * vertical active integer multiple and maximum vertical slice count * requirements are met. */ for (slice_height = 108; slice_height <= vactive; slice_height += 2) if (vactive % slice_height == 0) return slice_height; /* * Highly unlikely we reach here as most of the resolutions will end up * finding appropriate slice_height in above loop but returning * slice_height as 2 here as it should work with all resolutions. */ return 2; } static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u8 line_buf_depth; int ret; /* * RC_MODEL_SIZE is currently a constant across all configurations. * * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and * DP_DSC_RC_BUF_SIZE for this. */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; vdsc_cfg->dsc_version_major = (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; vdsc_cfg->dsc_version_minor = min(intel_dp_source_dsc_version_minor(intel_dp), intel_dp_sink_dsc_version_minor(intel_dp)); if (vdsc_cfg->convert_rgb) vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & DP_DSC_RGB; line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); if (!line_buf_depth) { drm_dbg_kms(&i915->drm, "DSC Sink Line Buffer Depth invalid\n"); return -EINVAL; } if (vdsc_cfg->dsc_version_minor == 2) vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; else vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; vdsc_cfg->block_pred_enable = intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_BLK_PREDICTION_IS_SUPPORTED; return drm_dsc_compute_rc_parameters(vdsc_cfg); } static bool intel_dp_dsc_supports_format(struct intel_dp *intel_dp, enum intel_output_format output_format) { u8 sink_dsc_format; switch (output_format) { case INTEL_OUTPUT_FORMAT_RGB: sink_dsc_format = DP_DSC_RGB; break; case INTEL_OUTPUT_FORMAT_YCBCR444: sink_dsc_format = DP_DSC_YCbCr444; break; case INTEL_OUTPUT_FORMAT_YCBCR420: if (min(intel_dp_source_dsc_version_minor(intel_dp), intel_dp_sink_dsc_version_minor(intel_dp)) < 2) return false; sink_dsc_format = DP_DSC_YCbCr420_Native; break; default: return false; } return drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, sink_dsc_format); } int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state, struct link_config_limits *limits, int timeslots, bool compute_pipe_bpp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int pipe_bpp; int ret; pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && intel_dp_supports_fec(intel_dp, pipe_config); if (!intel_dp_supports_dsc(intel_dp, pipe_config)) return -EINVAL; if (!intel_dp_dsc_supports_format(intel_dp, pipe_config->output_format)) return -EINVAL; if (compute_pipe_bpp) pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); else pipe_bpp = pipe_config->pipe_bpp; if (intel_dp->force_dsc_bpc) { pipe_bpp = intel_dp->force_dsc_bpc * 3; drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp); } /* Min Input BPC for ICL+ is 8 */ if (pipe_bpp < 8 * 3) { drm_dbg_kms(&dev_priv->drm, "No DSC support for less than 8bpc\n"); return -EINVAL; } /* * For now enable DSC for max bpp, max link rate, max lane count. * Optimize this later for the minimum possible link rate/lane count * with DSC enabled for the requested mode. */ pipe_config->pipe_bpp = pipe_bpp; pipe_config->port_clock = limits->max_rate; pipe_config->lane_count = limits->max_lane_count; if (intel_dp_is_edp(intel_dp)) { pipe_config->dsc.compressed_bpp = min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, pipe_config->pipe_bpp); pipe_config->dsc.slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); if (!pipe_config->dsc.slice_count) { drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", pipe_config->dsc.slice_count); return -EINVAL; } } else { u16 dsc_max_output_bpp = 0; u8 dsc_dp_slice_count; if (compute_pipe_bpp) { dsc_max_output_bpp = intel_dp_dsc_get_output_bpp(dev_priv, pipe_config->port_clock, pipe_config->lane_count, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, pipe_config->bigjoiner_pipes, pipe_bpp, timeslots); /* * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum * supported PPS value can be 63.9375 and with the further * mention that bpp should be programmed double the target bpp * restricting our target bpp to be 31.9375 at max */ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) dsc_max_output_bpp = min_t(u16, dsc_max_output_bpp, 31 << 4); if (!dsc_max_output_bpp) { drm_dbg_kms(&dev_priv->drm, "Compressed BPP not supported\n"); return -EINVAL; } } dsc_dp_slice_count = intel_dp_dsc_get_slice_count(intel_dp, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, pipe_config->bigjoiner_pipes); if (!dsc_dp_slice_count) { drm_dbg_kms(&dev_priv->drm, "Compressed Slice Count not supported\n"); return -EINVAL; } /* * compute pipe bpp is set to false for DP MST DSC case * and compressed_bpp is calculated same time once * vpci timeslots are allocated, because overall bpp * calculation procedure is bit different for MST case. */ if (compute_pipe_bpp) { pipe_config->dsc.compressed_bpp = min_t(u16, dsc_max_output_bpp >> 4, pipe_config->pipe_bpp); } pipe_config->dsc.slice_count = dsc_dp_slice_count; drm_dbg_kms(&dev_priv->drm, "DSC: compressed bpp %d slice count %d\n", pipe_config->dsc.compressed_bpp, pipe_config->dsc.slice_count); } /* * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate * is greater than the maximum Cdclock and if slice count is even * then we need to use 2 VDSC instances. */ if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1) pipe_config->dsc.dsc_split = true; ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); if (ret < 0) { drm_dbg_kms(&dev_priv->drm, "Cannot compute valid DSC parameters for Input Bpp = %d " "Compressed BPP = %d\n", pipe_config->pipe_bpp, pipe_config->dsc.compressed_bpp); return ret; } pipe_config->dsc.compression_enable = true; drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " "Compressed Bpp = %d Slice Count = %d\n", pipe_config->pipe_bpp, pipe_config->dsc.compressed_bpp, pipe_config->dsc.slice_count); return 0; } static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state, bool respect_downstream_limits) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; bool joiner_needs_dsc = false; int ret; limits.min_rate = intel_dp_common_rate(intel_dp, 0); limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = 1; limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config, respect_downstream_limits); if (intel_dp->use_max_params) { /* * Use the maximum clock and number of lanes the eDP panel * advertizes being capable of in case the initial fast * optimal params failed us. The panels are generally * designed to support only a single clock and lane * configuration, and typically on older panels these * values correspond to the native resolution of the panel. */ limits.min_lane_count = limits.max_lane_count; limits.min_rate = limits.max_rate; } intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " "max rate %d max bpp %d pixel clock %iKHz\n", limits.max_lane_count, limits.max_rate, limits.max_bpp, adjusted_mode->crtc_clock); if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_clock)) pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); /* * Pipe joiner needs compression up to display 12 due to bandwidth * limitation. DG2 onwards pipe joiner can be enabled without * compression. */ joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes; /* * Optimize for slow and wide for everything, because there are some * eDP 1.3 and 1.4 panels don't work well with fast and narrow. */ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits); if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) { drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", str_yes_no(ret), str_yes_no(joiner_needs_dsc), str_yes_no(intel_dp->force_dsc_en)); ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, conn_state, &limits, 64, true); if (ret < 0) return ret; } if (pipe_config->dsc.compression_enable) { drm_dbg_kms(&i915->drm, "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, pipe_config->dsc.compressed_bpp); drm_dbg_kms(&i915->drm, "DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, pipe_config->dsc.compressed_bpp), intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } else { drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp); drm_dbg_kms(&i915->drm, "DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, pipe_config->pipe_bpp), intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } return 0; } bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; /* * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) return false; if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* * See: * CEA-861-E - 5.1 Default Encoding Parameters * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry */ return crtc_state->pipe_bpp != 18 && drm_default_rgb_quant_range(adjusted_mode) == HDMI_QUANTIZATION_RANGE_LIMITED; } else { return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; } } static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, enum port port) { if (IS_G4X(dev_priv)) return false; if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) return false; return true; } static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, struct drm_dp_vsc_sdp *vsc) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ * Colorimetry Format indication. */ vsc->revision = 0x5; vsc->length = 0x13; /* DP 1.4a spec, Table 2-120 */ switch (crtc_state->output_format) { case INTEL_OUTPUT_FORMAT_YCBCR444: vsc->pixelformat = DP_PIXELFORMAT_YUV444; break; case INTEL_OUTPUT_FORMAT_YCBCR420: vsc->pixelformat = DP_PIXELFORMAT_YUV420; break; case INTEL_OUTPUT_FORMAT_RGB: default: vsc->pixelformat = DP_PIXELFORMAT_RGB; } switch (conn_state->colorspace) { case DRM_MODE_COLORIMETRY_BT709_YCC: vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; break; case DRM_MODE_COLORIMETRY_XVYCC_601: vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; break; case DRM_MODE_COLORIMETRY_XVYCC_709: vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; break; case DRM_MODE_COLORIMETRY_SYCC_601: vsc->colorimetry = DP_COLORIMETRY_SYCC_601; break; case DRM_MODE_COLORIMETRY_OPYCC_601: vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; break; case DRM_MODE_COLORIMETRY_BT2020_CYCC: vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; break; case DRM_MODE_COLORIMETRY_BT2020_RGB: vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; break; case DRM_MODE_COLORIMETRY_BT2020_YCC: vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; break; case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; break; default: /* * RGB->YCBCR color conversion uses the BT.709 * color space. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; else vsc->colorimetry = DP_COLORIMETRY_DEFAULT; break; } vsc->bpc = crtc_state->pipe_bpp / 3; /* only RGB pixelformat supports 6 bpc */ drm_WARN_ON(&dev_priv->drm, vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); /* all YCbCr are always limited range */ vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; } static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ if (crtc_state->has_psr) return; if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) return; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); vsc->sdp_type = DP_SDP_VSC; intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, &crtc_state->infoframes.vsc); } void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, struct drm_dp_vsc_sdp *vsc) { vsc->sdp_type = DP_SDP_VSC; if (crtc_state->has_psr2) { if (intel_dp->psr.colorimetry_support && intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { /* [PSR2, +Colorimetry] */ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, vsc); } else { /* * [PSR2, -Colorimetry] * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 * 3D stereo + PSR/PSR2 + Y-coordinate. */ vsc->revision = 0x4; vsc->length = 0xe; } } else { /* * [PSR1] * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or * higher). */ vsc->revision = 0x2; vsc->length = 0x8; } } static void intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { int ret; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; if (!conn_state->hdr_output_metadata) return; ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); if (ret) { drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); return; } crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915, enum transcoder cpu_transcoder) { if (HAS_DOUBLE_BUFFERED_M_N(i915)) return true; return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); } static bool can_enable_drrs(struct intel_connector *connector, const struct intel_crtc_state *pipe_config, const struct drm_display_mode *downclock_mode) { struct drm_i915_private *i915 = to_i915(connector->base.dev); if (pipe_config->vrr.enable) return false; /* * DRRS and PSR can't be enable together, so giving preference to PSR * as it allows more power-savings by complete shutting down display, * so to guarantee this, intel_drrs_compute_config() must be called * after intel_psr_compute_config(). */ if (pipe_config->has_psr) return false; /* FIXME missing FDI M2/N2 etc. */ if (pipe_config->has_pch_encoder) return false; if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) return false; return downclock_mode && intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; } static void intel_dp_drrs_compute_config(struct intel_connector *connector, struct intel_crtc_state *pipe_config, int output_bpp) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *downclock_mode = intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); int pixel_clock; if (has_seamless_m_n(connector)) pipe_config->seamless_m_n = true; if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) intel_zero_m_n(&pipe_config->dp_m2_n2); return; } if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; pipe_config->has_drrs = true; pixel_clock = downclock_mode->clock; if (pipe_config->splitter.enable) pixel_clock /= pipe_config->splitter.link_count; intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, pipe_config->port_clock, &pipe_config->dp_m2_n2, pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; } static bool intel_dp_has_audio(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); if (!intel_dp_port_has_audio(i915, encoder->port)) return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) return connector->base.display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } static int intel_dp_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state, bool respect_downstream_limits) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; const struct drm_display_info *info = &connector->base.display_info; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; bool ycbcr_420_only; int ret; ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { drm_dbg_kms(&i915->drm, "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; } else { crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); } crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, respect_downstream_limits); if (ret) { if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || !connector->base.ycbcr_420_allowed || !drm_mode_is_420_also(info, adjusted_mode)) return ret; crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, respect_downstream_limits); } return ret; } static void intel_dp_audio_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = conn_state->connector; pipe_config->sdp_split_enable = intel_dp_has_audio(encoder, conn_state) && intel_dp_is_uhbr(pipe_config); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", connector->base.id, connector->name, str_yes_no(pipe_config->sdp_split_enable)); } int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; struct intel_connector *connector = intel_dp->attached_connector; int ret = 0, output_bpp; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) pipe_config->has_pch_encoder = true; pipe_config->has_audio = intel_dp_has_audio(encoder, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); if (intel_dp_is_edp(intel_dp) && fixed_mode) { ret = intel_panel_compute_config(connector, adjusted_mode); if (ret) return ret; } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; if (!connector->base.interlace_allowed && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return -EINVAL; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return -EINVAL; if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) return -EINVAL; /* * Try to respect downstream TMDS clock limits first, if * that fails assume the user might know something we don't. */ ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); if (ret) ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); if (ret) return ret; if ((intel_dp_is_edp(intel_dp) && fixed_mode) || pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; } pipe_config->limited_color_range = intel_dp_limited_color_range(pipe_config, conn_state); if (pipe_config->dsc.compression_enable) output_bpp = pipe_config->dsc.compressed_bpp; else output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_config->pipe_bpp); if (intel_dp->mso_link_count) { int n = intel_dp->mso_link_count; int overlap = intel_dp->mso_pixel_overlap; pipe_config->splitter.enable = true; pipe_config->splitter.link_count = n; pipe_config->splitter.pixel_overlap = overlap; drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", n, overlap); adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; adjusted_mode->crtc_clock /= n; } intel_dp_audio_compute_config(encoder, pipe_config, conn_state); intel_link_compute_m_n(output_bpp, pipe_config->lane_count, adjusted_mode->crtc_clock, pipe_config->port_clock, &pipe_config->dp_m_n, pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; if (!HAS_DDI(dev_priv)) g4x_dp_set_clock(encoder, pipe_config); intel_vrr_compute_config(pipe_config, conn_state); intel_psr_compute_config(intel_dp, pipe_config, conn_state); intel_dp_drrs_compute_config(connector, pipe_config, output_bpp); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); return 0; } void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp->link_trained = false; intel_dp->link_rate = link_rate; intel_dp->lane_count = lane_count; } static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) { intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); } /* Enable backlight PWM and backlight PP control. */ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; drm_dbg_kms(&i915->drm, "\n"); intel_backlight_enable(crtc_state, conn_state); intel_pps_backlight_on(intel_dp); } /* Disable backlight PP control and backlight PWM. */ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; drm_dbg_kms(&i915->drm, "\n"); intel_pps_backlight_off(intel_dp); intel_backlight_disable(old_conn_state); } static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) { /* * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus * be capable of signalling downstream hpd with a long pulse. * Whether or not that means D3 is safe to use is not clear, * but let's assume so until proven otherwise. * * FIXME should really check all downstream ports... */ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && drm_dp_is_branch(intel_dp->dpcd) && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; } void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int ret; if (!crtc_state->dsc.compression_enable) return; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, enable ? DP_DECOMPRESSION_EN : 0); if (ret < 0) drm_dbg_kms(&i915->drm, "Failed to %s sink decompression state\n", str_enable_disable(enable)); } static void intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 oui[] = { 0x00, 0xaa, 0x01 }; u8 buf[3] = { 0 }; /* * During driver init, we want to be careful and avoid changing the source OUI if it's * already set to what we want, so as to avoid clearing any state by accident */ if (careful) { if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) drm_err(&i915->drm, "Failed to read source OUI\n"); if (memcmp(oui, buf, sizeof(oui)) == 0) return; } if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) drm_err(&i915->drm, "Failed to write source OUI\n"); intel_dp->last_oui_write = jiffies; } void intel_dp_wait_source_oui(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", connector->base.base.id, connector->base.name, connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); } /* If the device supports it, try to set the power state appropriately */ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); int ret, i; /* Should have a valid DPCD by this point */ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; if (mode != DP_SET_POWER_D0) { if (downstream_hpd_needs_d0(intel_dp)) return; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); } else { struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); lspcon_resume(dp_to_dig_port(intel_dp)); /* Write the source OUI as early as possible */ if (intel_dp_is_edp(intel_dp)) intel_edp_init_source_oui(intel_dp, false); /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. */ for (i = 0; i < 3; i++) { ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); if (ret == 1) break; msleep(1); } if (ret == 1 && lspcon->active) lspcon_wait_pcon_mode(lspcon); } if (ret != 1) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", encoder->base.base.id, encoder->base.name, mode == DP_SET_POWER_D0 ? "D0" : "D3"); } static bool intel_dp_get_dpcd(struct intel_dp *intel_dp); /** * intel_dp_sync_state - sync the encoder state during init/resume * @encoder: intel encoder to sync * @crtc_state: state for the CRTC connected to the encoder * * Sync any state stored in the encoder wrt. HW state during driver init * and system resume. */ void intel_dp_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (!crtc_state) return; /* * Don't clobber DPCD if it's been already read out during output * setup (eDP) or detect. */ if (intel_dp->dpcd[DP_DPCD_REV] == 0) intel_dp_get_dpcd(intel_dp); intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); bool fastset = true; /* * If BIOS has set an unsupported or non-standard link rate for some * reason force an encoder recompute and full modeset. */ if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, crtc_state->port_clock) < 0) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.connectors_changed = true; fastset = false; } /* * FIXME hack to force full modeset when DSC is being used. * * As long as we do not have full state readout and config comparison * of crtc_state->dsc, we have no way to ensure reliable fastset. * Remove once we have readout for DSC. */ if (crtc_state->dsc.compression_enable) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.mode_changed = true; fastset = false; } if (CAN_PSR(intel_dp)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.mode_changed = true; fastset = false; } return fastset; } static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); /* Clear the cached register set to avoid using stale values */ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, intel_dp->pcon_dsc_dpcd, sizeof(intel_dp->pcon_dsc_dpcd)) < 0) drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", DP_PCON_DSC_ENCODER); drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); } static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) { int bw_gbps[] = {9, 18, 24, 32, 40, 48}; int i; for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { if (frl_bw_mask & (1 << i)) return bw_gbps[i]; } return 0; } static int intel_dp_pcon_set_frl_mask(int max_frl) { switch (max_frl) { case 48: return DP_PCON_FRL_BW_MASK_48GBPS; case 40: return DP_PCON_FRL_BW_MASK_40GBPS; case 32: return DP_PCON_FRL_BW_MASK_32GBPS; case 24: return DP_PCON_FRL_BW_MASK_24GBPS; case 18: return DP_PCON_FRL_BW_MASK_18GBPS; case 9: return DP_PCON_FRL_BW_MASK_9GBPS; } return 0; } static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) { struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; int max_frl_rate; int max_lanes, rate_per_lane; int max_dsc_lanes, dsc_rate_per_lane; max_lanes = connector->display_info.hdmi.max_lanes; rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; max_frl_rate = max_lanes * rate_per_lane; if (connector->display_info.hdmi.dsc_cap.v_1p2) { max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; if (max_dsc_lanes && dsc_rate_per_lane) max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); } return max_frl_rate; } static bool intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, u8 max_frl_bw_mask, u8 *frl_trained_mask) { if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && *frl_trained_mask >= max_frl_bw_mask) return true; return false; } static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) { #define TIMEOUT_FRL_READY_MS 500 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 struct drm_i915_private *i915 = dp_to_i915(intel_dp); int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; u8 max_frl_bw_mask = 0, frl_trained_mask; bool is_active; max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); if (max_frl_bw <= 0) return -EINVAL; max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) goto frl_trained; ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); if (ret < 0) return ret; /* Wait for PCON to be FRL Ready */ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); if (!is_active) return -ETIMEDOUT; ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, DP_PCON_ENABLE_SEQUENTIAL_LINK); if (ret < 0) return ret; ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, DP_PCON_FRL_LINK_TRAIN_NORMAL); if (ret < 0) return ret; ret = drm_dp_pcon_frl_enable(&intel_dp->aux); if (ret < 0) return ret; /* * Wait for FRL to be completed * Check if the HDMI Link is up and active. */ wait_for(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), TIMEOUT_HDMI_LINK_ACTIVE_MS); if (!is_active) return -ETIMEDOUT; frl_trained: drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); intel_dp->frl.is_trained = true; drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); return 0; } static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) { if (drm_dp_is_branch(intel_dp->dpcd) && intel_dp_has_hdmi_sink(intel_dp) && intel_dp_hdmi_sink_max_frl(intel_dp) > 0) return true; return false; } static int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) { int ret; u8 buf = 0; /* Set PCON source control mode */ buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); if (ret < 0) return ret; /* Set HDMI LINK ENABLE */ buf |= DP_PCON_ENABLE_HDMI_LINK; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); if (ret < 0) return ret; return 0; } void intel_dp_check_frl_training(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* * Always go for FRL training if: * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) * -sink is HDMI2.1 */ if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || !intel_dp_is_hdmi_2_1_sink(intel_dp) || intel_dp->frl.is_trained) return; if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { int ret, mode; drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); ret = intel_dp_pcon_set_tmds_mode(intel_dp); mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); } else { drm_dbg(&dev_priv->drm, "FRL training Completed\n"); } } static int intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) { int vactive = crtc_state->hw.adjusted_mode.vdisplay; return intel_hdmi_dsc_get_slice_height(vactive); } static int intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, pcon_max_slice_width, hdmi_max_slices, hdmi_throughput); } static int intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int num_slices, int slice_width) { struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; int output_format = crtc_state->output_format; bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); int hdmi_max_chunk_bytes = connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, num_slices, output_format, hdmi_all_bpp, hdmi_max_chunk_bytes); } void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { u8 pps_param[6]; int slice_height; int slice_width; int num_slices; int bits_per_pixel; int ret; struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector *connector; bool hdmi_is_dsc_1_2; if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) return; if (!intel_connector) return; connector = &intel_connector->base; hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || !hdmi_is_dsc_1_2) return; slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); if (!slice_height) return; num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); if (!num_slices) return; slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, num_slices); bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, num_slices, slice_width); if (!bits_per_pixel) return; pps_param[0] = slice_height & 0xFF; pps_param[1] = slice_height >> 8; pps_param[2] = slice_width & 0xFF; pps_param[3] = slice_width >> 8; pps_param[4] = bits_per_pixel & 0xFF; pps_param[5] = (bits_per_pixel >> 8) & 0x3; ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); if (ret < 0) drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); } void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool ycbcr444_to_420 = false; bool rgb_to_ycbcr = false; u8 tmp; if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) return; if (!drm_dp_is_branch(intel_dp->dpcd)) return; tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { switch (crtc_state->output_format) { case INTEL_OUTPUT_FORMAT_YCBCR420: break; case INTEL_OUTPUT_FORMAT_YCBCR444: ycbcr444_to_420 = true; break; case INTEL_OUTPUT_FORMAT_RGB: rgb_to_ycbcr = true; ycbcr444_to_420 = true; break; default: MISSING_CASE(crtc_state->output_format); break; } } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { switch (crtc_state->output_format) { case INTEL_OUTPUT_FORMAT_YCBCR444: break; case INTEL_OUTPUT_FORMAT_RGB: rgb_to_ycbcr = true; break; default: MISSING_CASE(crtc_state->output_format); break; } } tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) drm_dbg_kms(&i915->drm, "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) drm_dbg_kms(&i915->drm, "Failed to %s protocol converter RGB->YCbCr conversion mode\n", str_enable_disable(tmp)); } bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { u8 dprx = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &dprx) != 1) return false; return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; } static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); /* * Clear the cached register set to avoid using stale values * for the sinks that do not support DSC. */ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); /* Clear fec_capable to avoid using stale values */ intel_dp->fec_capable = 0; /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || intel_dp->edp_dpcd[0] >= DP_EDP_14) { if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, intel_dp->dsc_dpcd, sizeof(intel_dp->dsc_dpcd)) < 0) drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", DP_DSC_SUPPORT); drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", (int)sizeof(intel_dp->dsc_dpcd), intel_dp->dsc_dpcd); /* FEC is supported only on DP 1.4 */ if (!intel_dp_is_edp(intel_dp) && drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, &intel_dp->fec_capable) < 0) drm_err(&i915->drm, "Failed to read FEC DPCD register\n"); drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", intel_dp->fec_capable); } } static void intel_edp_mso_mode_fixup(struct intel_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); int n = intel_dp->mso_link_count; int overlap = intel_dp->mso_pixel_overlap; if (!mode || !n) return; mode->hdisplay = (mode->hdisplay - overlap) * n; mode->hsync_start = (mode->hsync_start - overlap) * n; mode->hsync_end = (mode->hsync_end - overlap) * n; mode->htotal = (mode->htotal - overlap) * n; mode->clock *= n; drm_mode_set_name(mode); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); } void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_connector *connector = intel_dp->attached_connector; if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { /* * This is a big fat ugly hack. * * Some machines in UEFI boot mode provide us a VBT that has 18 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons * unknown we fail to light up. Yet the same BIOS boots up with * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as * max, not what it tells us to use. * * Note: This will still be broken if the eDP panel is not lit * up by the BIOS, and thus we can't get the mode at module * load. */ drm_dbg_kms(&dev_priv->drm, "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", pipe_bpp, connector->panel.vbt.edp.bpp); connector->panel.vbt.edp.bpp = pipe_bpp; } } static void intel_edp_mso_init(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; struct drm_display_info *info = &connector->base.display_info; u8 mso; if (intel_dp->edp_dpcd[0] < DP_EDP_14) return; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { drm_err(&i915->drm, "Failed to read MSO cap\n"); return; } /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); mso = 0; } if (mso) { drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, info->mso_pixel_overlap); if (!HAS_MSO(i915)) { drm_err(&i915->drm, "No source MSO support, disabling\n"); mso = 0; } } intel_dp->mso_link_count = mso; intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; } static bool intel_edp_init_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); /* this function is meant to be called only once */ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) return false; drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); /* * Read the eDP display control registers. * * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it * set, but require eDP 1.4+ detection (e.g. for supported link rates * method). The display control registers should read zero if they're * not supported anyway. */ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == sizeof(intel_dp->edp_dpcd)) { drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", (int)sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; } /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] */ intel_psr_init_dpcd(intel_dp); /* Clear the default sink rates */ intel_dp->num_sink_rates = 0; /* Read the eDP 1.4+ supported link rates. */ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; int i; drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates)); for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { int val = le16_to_cpu(sink_rates[i]); if (val == 0) break; /* Value read multiplied by 200kHz gives the per-lane * link rate in kHz. The source rates are, however, * stored in terms of LS_Clk kHz. The full conversion * back to symbols is * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) */ intel_dp->sink_rates[i] = (val * 200) / 10; } intel_dp->num_sink_rates = i; } /* * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. */ if (intel_dp->num_sink_rates) intel_dp->use_rate_select = true; else intel_dp_set_sink_rates(intel_dp); intel_dp_set_max_sink_lane_count(intel_dp); /* Read the eDP DSC DPCD registers */ if (HAS_DSC(dev_priv)) intel_dp_get_dsc_sink_cap(intel_dp); /* * If needed, program our source OUI so we can make various Intel-specific AUX services * available (such as HDR backlight controls) */ intel_edp_init_source_oui(intel_dp, true); return true; } static bool intel_dp_has_sink_count(struct intel_dp *intel_dp) { if (!intel_dp->attached_connector) return false; return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, intel_dp->dpcd, &intel_dp->desc); } static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { int ret; if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) return false; /* * Don't clobber cached eDP rates. Also skip re-reading * the OUI/ID since we know it won't change. */ if (!intel_dp_is_edp(intel_dp)) { drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); intel_dp_set_sink_rates(intel_dp); intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); } if (intel_dp_has_sink_count(intel_dp)) { ret = drm_dp_read_sink_count(&intel_dp->aux); if (ret < 0) return false; /* * Sink count can change between short pulse hpd hence * a member variable in intel_dp will track any changes * between short pulse interrupts. */ intel_dp->sink_count = ret; /* * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that * a dongle is present but no display. Unless we require to know * if a dongle is present or not, we don't need to update * downstream port information. So, an early return here saves * time from performing other operations which are not required. */ if (!intel_dp->sink_count) return false; } return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, intel_dp->downstream_ports) == 0; } static bool intel_dp_can_mst(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); return i915->params.enable_dp_mst && intel_dp_mst_source_support(intel_dp) && drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); } static void intel_dp_configure_mst(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", encoder->base.base.id, encoder->base.name, str_yes_no(intel_dp_mst_source_support(intel_dp)), str_yes_no(sink_can_mst), str_yes_no(i915->params.enable_dp_mst)); if (!intel_dp_mst_source_support(intel_dp)) return; intel_dp->is_mst = sink_can_mst && i915->params.enable_dp_mst; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) { return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; } static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) { int retry; for (retry = 0; retry < 3; retry++) { if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3) == 3) return true; } return false; } bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { /* * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication * of Color Encoding Format and Content Color Gamut], in order to * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) return true; switch (conn_state->colorspace) { case DRM_MODE_COLORIMETRY_SYCC_601: case DRM_MODE_COLORIMETRY_OPYCC_601: case DRM_MODE_COLORIMETRY_BT2020_YCC: case DRM_MODE_COLORIMETRY_BT2020_RGB: case DRM_MODE_COLORIMETRY_BT2020_CYCC: return true; default: break; } return false; } static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp, size_t size) { size_t length = sizeof(struct dp_sdp); if (size < length) return -ENOSPC; memset(sdp, 0, size); /* * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 * VSC SDP Header Bytes */ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ /* * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as * per DP 1.4a spec. */ if (vsc->revision != 0x5) goto out; /* VSC SDP Payload for DB16 through DB18 */ /* Pixel Encoding and Colorimetry Formats */ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ switch (vsc->bpc) { case 6: /* 6bpc: 0x0 */ break; case 8: sdp->db[17] = 0x1; /* DB17[3:0] */ break; case 10: sdp->db[17] = 0x2; break; case 12: sdp->db[17] = 0x3; break; case 16: sdp->db[17] = 0x4; break; default: MISSING_CASE(vsc->bpc); break; } /* Dynamic Range and Component Bit Depth */ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) sdp->db[17] |= 0x80; /* DB17[7] */ /* Content Type */ sdp->db[18] = vsc->content_type & 0x7; out: return length; } static ssize_t intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, const struct hdmi_drm_infoframe *drm_infoframe, struct dp_sdp *sdp, size_t size) { size_t length = sizeof(struct dp_sdp); const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; ssize_t len; if (size < length) return -ENOSPC; memset(sdp, 0, size); len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); if (len < 0) { drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); return -ENOSPC; } if (len != infoframe_size) { drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); return -ENOSPC; } /* * Set up the infoframe sdp packet for HDR static metadata. * Prepare VSC Header for SU as per DP 1.4a spec, * Table 2-100 and Table 2-101 */ /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ sdp->sdp_header.HB0 = 0; /* * Packet Type 80h + Non-audio INFOFRAME Type value * HDMI_INFOFRAME_TYPE_DRM: 0x87 * - 80h + Non-audio INFOFRAME Type value * - InfoFrame Type: 0x07 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] */ sdp->sdp_header.HB1 = drm_infoframe->type; /* * Least Significant Eight Bits of (Data Byte Count – 1) * infoframe_size - 1 */ sdp->sdp_header.HB2 = 0x1D; /* INFOFRAME SDP Version Number */ sdp->sdp_header.HB3 = (0x13 << 2); /* CTA Header Byte 2 (INFOFRAME Version Number) */ sdp->db[0] = drm_infoframe->version; /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ sdp->db[1] = drm_infoframe->length; /* * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after * HDMI_INFOFRAME_HEADER_SIZE */ BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], HDMI_DRM_INFOFRAME_SIZE); /* * Size of DP infoframe sdp packet for HDR static metadata consists of * - DP SDP Header(struct dp_sdp_header): 4 bytes * - Two Data Blocks: 2 bytes * CTA Header Byte2 (INFOFRAME Version Number) * CTA Header Byte3 (Length of INFOFRAME) * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes * * Prior to GEN11's GMP register size is identical to DP HDR static metadata * infoframe size. But GEN11+ has larger than that size, write_infoframe * will pad rest of the size. */ return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; } static void intel_write_dp_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct dp_sdp sdp = {}; ssize_t len; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; switch (type) { case DP_SDP_VSC: len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, sizeof(sdp)); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, &crtc_state->infoframes.drm.drm, &sdp, sizeof(sdp)); break; default: MISSING_CASE(type); return; } if (drm_WARN_ON(&dev_priv->drm, len < 0)) return; dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); } void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_dp_vsc_sdp *vsc) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct dp_sdp sdp = {}; ssize_t len; len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); if (drm_WARN_ON(&dev_priv->drm, len < 0)) return; dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, &sdp, len); } void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; /* TODO: Add DSC case (DIP_ENABLE_PPS) */ /* When PSR is enabled, this routine doesn't disable VSC DIP */ if (!crtc_state->has_psr) val &= ~VIDEO_DIP_ENABLE_VSC_HSW; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); if (!enable) return; /* When PSR is enabled, VSC SDP is handled by PSR routine */ if (!crtc_state->has_psr) intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); } static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, const void *buffer, size_t size) { const struct dp_sdp *sdp = buffer; if (size < sizeof(struct dp_sdp)) return -EINVAL; memset(vsc, 0, sizeof(*vsc)); if (sdp->sdp_header.HB0 != 0) return -EINVAL; if (sdp->sdp_header.HB1 != DP_SDP_VSC) return -EINVAL; vsc->sdp_type = sdp->sdp_header.HB1; vsc->revision = sdp->sdp_header.HB2; vsc->length = sdp->sdp_header.HB3; if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { /* * - HB2 = 0x2, HB3 = 0x8 * VSC SDP supporting 3D stereo + PSR * - HB2 = 0x4, HB3 = 0xe * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of * first scan line of the SU region (applies to eDP v1.4b * and higher). */ return 0; } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { /* * - HB2 = 0x5, HB3 = 0x13 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry * Format. */ vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; vsc->colorimetry = sdp->db[16] & 0xf; vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; switch (sdp->db[17] & 0x7) { case 0x0: vsc->bpc = 6; break; case 0x1: vsc->bpc = 8; break; case 0x2: vsc->bpc = 10; break; case 0x3: vsc->bpc = 12; break; case 0x4: vsc->bpc = 16; break; default: MISSING_CASE(sdp->db[17] & 0x7); return -EINVAL; } vsc->content_type = sdp->db[18] & 0x7; } else { return -EINVAL; } return 0; } static int intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, const void *buffer, size_t size) { int ret; const struct dp_sdp *sdp = buffer; if (size < sizeof(struct dp_sdp)) return -EINVAL; if (sdp->sdp_header.HB0 != 0) return -EINVAL; if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) return -EINVAL; /* * Least Significant Eight Bits of (Data Byte Count – 1) * 1Dh (i.e., Data Byte Count = 30 bytes). */ if (sdp->sdp_header.HB2 != 0x1D) return -EINVAL; /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ if ((sdp->sdp_header.HB3 & 0x3) != 0) return -EINVAL; /* INFOFRAME SDP Version Number */ if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) return -EINVAL; /* CTA Header Byte 2 (INFOFRAME Version Number) */ if (sdp->db[0] != 1) return -EINVAL; /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) return -EINVAL; ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], HDMI_DRM_INFOFRAME_SIZE); return ret; } static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_dp_vsc_sdp *vsc) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); unsigned int type = DP_SDP_VSC; struct dp_sdp sdp = {}; int ret; /* When PSR is enabled, VSC SDP is handled by PSR routine */ if (crtc_state->has_psr) return; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); if (ret) drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); } static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct hdmi_drm_infoframe *drm_infoframe) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; struct dp_sdp sdp = {}; int ret; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(type)) == 0) return; dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, sizeof(sdp)); if (ret) drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP HDR Metadata Infoframe SDP\n"); } void intel_read_dp_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, unsigned int type) { switch (type) { case DP_SDP_VSC: intel_read_dp_vsc_sdp(encoder, crtc_state, &crtc_state->infoframes.vsc); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, &crtc_state->infoframes.drm.drm); break; default: MISSING_CASE(type); break; } } static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int status = 0; int test_link_rate; u8 test_lane_count, test_link_bw; /* (DP CTS 1.2) * 4.3.1.11 */ /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, &test_lane_count); if (status <= 0) { drm_dbg_kms(&i915->drm, "Lane count read failed\n"); return DP_TEST_NAK; } test_lane_count &= DP_MAX_LANE_COUNT_MASK; status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, &test_link_bw); if (status <= 0) { drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); return DP_TEST_NAK; } test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); /* Validate the requested link rate and lane count */ if (!intel_dp_link_params_valid(intel_dp, test_link_rate, test_lane_count)) return DP_TEST_NAK; intel_dp->compliance.test_lane_count = test_lane_count; intel_dp->compliance.test_link_rate = test_link_rate; return DP_TEST_ACK; } static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 test_pattern; u8 test_misc; __be16 h_width, v_height; int status = 0; /* Read the TEST_PATTERN (DP CTS 3.1.5) */ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, &test_pattern); if (status <= 0) { drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); return DP_TEST_NAK; } if (test_pattern != DP_COLOR_RAMP) return DP_TEST_NAK; status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, &h_width, 2); if (status <= 0) { drm_dbg_kms(&i915->drm, "H Width read failed\n"); return DP_TEST_NAK; } status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, &v_height, 2); if (status <= 0) { drm_dbg_kms(&i915->drm, "V Height read failed\n"); return DP_TEST_NAK; } status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, &test_misc); if (status <= 0) { drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); return DP_TEST_NAK; } if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) return DP_TEST_NAK; if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) return DP_TEST_NAK; switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { case DP_TEST_BIT_DEPTH_6: intel_dp->compliance.test_data.bpc = 6; break; case DP_TEST_BIT_DEPTH_8: intel_dp->compliance.test_data.bpc = 8; break; default: return DP_TEST_NAK; } intel_dp->compliance.test_data.video_pattern = test_pattern; intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); /* Set test active flag here so userspace doesn't interrupt things */ intel_dp->compliance.test_active = true; return DP_TEST_ACK; } static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 test_result = DP_TEST_ACK; struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_connector *connector = &intel_connector->base; if (intel_connector->detect_edid == NULL || connector->edid_corrupt || intel_dp->aux.i2c_defer_count > 6) { /* Check EDID read for NACKs, DEFERs and corruption * (DP CTS 1.2 Core r1.1) * 4.2.2.4 : Failed EDID read, I2C_NAK * 4.2.2.5 : Failed EDID read, I2C_DEFER * 4.2.2.6 : EDID corruption detected * Use failsafe mode for all cases */ if (intel_dp->aux.i2c_nack_count > 0 || intel_dp->aux.i2c_defer_count > 0) drm_dbg_kms(&i915->drm, "EDID read had %d NACKs, %d DEFERs\n", intel_dp->aux.i2c_nack_count, intel_dp->aux.i2c_defer_count); intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; } else { /* FIXME: Get rid of drm_edid_raw() */ const struct edid *block = drm_edid_raw(intel_connector->detect_edid); /* We have to write the checksum of the last block read */ block += block->extensions; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, block->checksum) <= 0) drm_dbg_kms(&i915->drm, "Failed to write EDID checksum\n"); test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; } /* Set test active flag here so userspace doesn't interrupt things */ intel_dp->compliance.test_active = true; return test_result; } static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 pattern_val; switch (data->phy_pattern) { case DP_PHY_TEST_PATTERN_NONE: drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); break; case DP_PHY_TEST_PATTERN_D10_2: drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_SCRAMBLED_0); break; case DP_PHY_TEST_PATTERN_PRBS7: drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); break; case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: /* * FIXME: Ideally pattern should come from DPCD 0x250. As * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ drm_dbg_kms(&dev_priv->drm, "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); pattern_val = 0x3e0f83e0; intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); pattern_val = 0x0f83e0f8; intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); pattern_val = 0x0000f83e; intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_CUSTOM80); break; case DP_PHY_TEST_PATTERN_CP2520: /* * FIXME: Ideally pattern should come from DPCD 0x24A. As * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); pattern_val = 0xFB; intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | pattern_val); break; default: WARN(1, "Invalid Phy Test Pattern\n"); } } static void intel_dp_process_phy_request(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; u8 link_status[DP_LINK_STATUS_SIZE]; if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, link_status) < 0) { drm_dbg_kms(&i915->drm, "failed to get link status\n"); return; } /* retrieve vswing & pre-emphasis setting */ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); intel_dp_phy_pattern_update(intel_dp, crtc_state); drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, intel_dp->train_set, crtc_state->lane_count); drm_dp_set_phy_test_pattern(&intel_dp->aux, data, link_status[DP_DPCD_REV]); } static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); return DP_TEST_NAK; } /* Set test active flag here so userspace doesn't interrupt things */ intel_dp->compliance.test_active = true; return DP_TEST_ACK; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 response = DP_TEST_NAK; u8 request = 0; int status; status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); if (status <= 0) { drm_dbg_kms(&i915->drm, "Could not read test request from sink\n"); goto update_status; } switch (request) { case DP_TEST_LINK_TRAINING: drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); response = intel_dp_autotest_link_training(intel_dp); break; case DP_TEST_LINK_VIDEO_PATTERN: drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); response = intel_dp_autotest_video_pattern(intel_dp); break; case DP_TEST_LINK_EDID_READ: drm_dbg_kms(&i915->drm, "EDID test requested\n"); response = intel_dp_autotest_edid(intel_dp); break; case DP_TEST_LINK_PHY_TEST_PATTERN: drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); response = intel_dp_autotest_phy_pattern(intel_dp); break; default: drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", request); break; } if (response & DP_TEST_ACK) intel_dp->compliance.test_type = request; update_status: status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); if (status <= 0) drm_dbg_kms(&i915->drm, "Could not write test response to sink\n"); } static bool intel_dp_link_ok(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); bool uhbr = intel_dp->link_rate >= 1000000; bool ok; if (uhbr) ok = drm_dp_128b132b_lane_channel_eq_done(link_status, intel_dp->lane_count); else ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); if (ok) return true; intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s link not ok, retraining\n", encoder->base.base.id, encoder->base.name, uhbr ? "128b/132b" : "8b/10b"); return false; } static void intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) { bool handled = false; drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled); if (esi[1] & DP_CP_IRQ) { intel_hdcp_handle_cp_irq(intel_dp->attached_connector); ack[1] |= DP_CP_IRQ; } } static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 link_status[DP_LINK_STATUS_SIZE] = {}; const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, esi_link_status_size) != esi_link_status_size) { drm_err(&i915->drm, "[ENCODER:%d:%s] Failed to read link status\n", encoder->base.base.id, encoder->base.name); return false; } return intel_dp_link_ok(intel_dp, link_status); } /** * intel_dp_check_mst_status - service any pending MST interrupts, check link status * @intel_dp: Intel DP struct * * Read any pending MST interrupts, call MST core to handle these and ack the * interrupts. Check if the main and AUX link state is ok. * * Returns: * - %true if pending interrupts were serviced (or no interrupts were * pending) w/o detecting an error condition. * - %false if an error condition - like AUX failure or a loss of link - is * detected, which needs servicing from the hotplug work. */ static bool intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool link_ok = true; drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); for (;;) { u8 esi[4] = {}; u8 ack[4] = {}; if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { drm_dbg_kms(&i915->drm, "failed to get ESI - device may have failed\n"); link_ok = false; break; } drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); if (intel_dp->active_mst_links > 0 && link_ok && esi[3] & LINK_STATUS_CHANGED) { if (!intel_dp_mst_link_status(intel_dp)) link_ok = false; ack[3] |= LINK_STATUS_CHANGED; } intel_dp_mst_hpd_irq(intel_dp, esi, ack); if (!memchr_inv(ack, 0, sizeof(ack))) break; if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); } return link_ok; } static void intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) { bool is_active; u8 buf = 0; is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); if (intel_dp->frl.is_trained && !is_active) { if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) return; buf &= ~DP_PCON_ENABLE_HDMI_LINK; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) return; drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); intel_dp->frl.is_trained = false; /* Restart FRL training or fall back to TMDS mode */ intel_dp_check_frl_training(intel_dp); } } static bool intel_dp_needs_link_retrain(struct intel_dp *intel_dp) { u8 link_status[DP_LINK_STATUS_SIZE]; if (!intel_dp->link_trained) return false; /* * While PSR source HW is enabled, it will control main-link sending * frames, enabling and disabling it so trying to do a retrain will fail * as the link would or not be on or it could mix training patterns * and frame data at the same time causing retrain to fail. * Also when exiting PSR, HW will retrain the link anyways fixing * any link status error. */ if (intel_psr_enabled(intel_dp)) return false; if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, link_status) < 0) return false; /* * Validate the cached values of intel_dp->link_rate and * intel_dp->lane_count before attempting to retrain. * * FIXME would be nice to user the crtc state here, but since * we need to call this from the short HPD handler that seems * a bit hard. */ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, intel_dp->lane_count)) return false; /* Retrain if link not ok */ return !intel_dp_link_ok(intel_dp, link_status); } static bool intel_dp_has_connector(struct intel_dp *intel_dp, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder; enum pipe pipe; if (!conn_state->best_encoder) return false; /* SST */ encoder = &dp_to_dig_port(intel_dp)->base; if (conn_state->best_encoder == &encoder->base) return true; /* MST */ for_each_pipe(i915, pipe) { encoder = &intel_dp->mst_encoders[pipe]->base; if (conn_state->best_encoder == &encoder->base) return true; } return false; } int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; int ret = 0; *pipe_mask = 0; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state = connector->base.state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (!intel_dp_has_connector(intel_dp, conn_state)) continue; crtc = to_intel_crtc(conn_state->crtc); if (!crtc) continue; ret = drm_modeset_lock(&crtc->base.mutex, ctx); if (ret) break; crtc_state = to_intel_crtc_state(crtc->base.state); drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); if (!crtc_state->hw.active) continue; if (conn_state->commit && !try_wait_for_completion(&conn_state->commit->hw_done)) continue; *pipe_mask |= BIT(crtc->pipe); } drm_connector_list_iter_end(&conn_iter); return ret; } static bool intel_dp_is_connected(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; return connector->base.status == connector_status_connected || intel_dp->is_mst; } int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc; u8 pipe_mask; int ret; if (!intel_dp_is_connected(intel_dp)) return 0; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, ctx); if (ret) return ret; if (!intel_dp_needs_link_retrain(intel_dp)) return 0; ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); if (ret) return ret; if (pipe_mask == 0) return 0; if (!intel_dp_needs_link_retrain(intel_dp)) return 0; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", encoder->base.base.id, encoder->base.name); for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); /* Suppress underruns caused by re-training */ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); if (crtc_state->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, intel_crtc_pch_transcoder(crtc), false); } for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); /* retrain on the MST master transcoder */ if (DISPLAY_VER(dev_priv) >= 12 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && !intel_dp_mst_is_master_trans(crtc_state)) continue; intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp, crtc_state); intel_dp_stop_link_train(intel_dp, crtc_state); break; } for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); /* Keep underrun reporting disabled until things are stable */ intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, intel_crtc_pch_transcoder(crtc), true); } return 0; } static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; int ret = 0; *pipe_mask = 0; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state = connector->base.state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (!intel_dp_has_connector(intel_dp, conn_state)) continue; crtc = to_intel_crtc(conn_state->crtc); if (!crtc) continue; ret = drm_modeset_lock(&crtc->base.mutex, ctx); if (ret) break; crtc_state = to_intel_crtc_state(crtc->base.state); drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); if (!crtc_state->hw.active) continue; if (conn_state->commit && !try_wait_for_completion(&conn_state->commit->hw_done)) continue; *pipe_mask |= BIT(crtc->pipe); } drm_connector_list_iter_end(&conn_iter); return ret; } static int intel_dp_do_phy_test(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc; u8 pipe_mask; int ret; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, ctx); if (ret) return ret; ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); if (ret) return ret; if (pipe_mask == 0) return 0; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", encoder->base.base.id, encoder->base.name); for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); /* test on the MST master transcoder */ if (DISPLAY_VER(dev_priv) >= 12 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && !intel_dp_mst_is_master_trans(crtc_state)) continue; intel_dp_process_phy_request(intel_dp, crtc_state); break; } return 0; } void intel_dp_phy_test(struct intel_encoder *encoder) { struct drm_modeset_acquire_ctx ctx; int ret; drm_modeset_acquire_init(&ctx, 0); for (;;) { ret = intel_dp_do_phy_test(encoder, &ctx); if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); continue; } break; } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); drm_WARN(encoder->base.dev, ret, "Acquiring modeset locks failed with %i\n", ret); } static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 val; if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) return; drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); if (val & DP_AUTOMATED_TEST_REQUEST) intel_dp_handle_test_request(intel_dp); if (val & DP_CP_IRQ) intel_hdcp_handle_cp_irq(intel_dp->attached_connector); if (val & DP_SINK_SPECIFIC_IRQ) drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); } static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) { u8 val; if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) return; if (val & HDMI_LINK_STATUS_CHANGED) intel_dp_handle_hdmi_link_status_change(intel_dp); } /* * According to DP spec * 5.1.2: * 1. Read DPCD * 2. Configure link according to Receiver Capabilities * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt * * intel_dp_short_pulse - handles short pulse interrupts * when full detection is not required. * Returns %true if short pulse is handled and full detection * is NOT required and %false otherwise. */ static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 old_sink_count = intel_dp->sink_count; bool ret; /* * Clearing compliance test variables to allow capturing * of values for next automated test request. */ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); /* * Now read the DPCD to see if it's actually running * If the current value of sink count doesn't match with * the value that was stored earlier or dpcd read failed * we need to do full detection */ ret = intel_dp_get_dpcd(intel_dp); if ((old_sink_count != intel_dp->sink_count) || !ret) { /* No need to proceed if we are going to do full detect */ return false; } intel_dp_check_device_service_irq(intel_dp); intel_dp_check_link_service_irq(intel_dp); /* Handle CEC interrupts, if any */ drm_dp_cec_irq(&intel_dp->aux); /* defer to the hotplug work for link retraining if needed */ if (intel_dp_needs_link_retrain(intel_dp)) return false; intel_psr_short_pulse(intel_dp); switch (intel_dp->compliance.test_type) { case DP_TEST_LINK_TRAINING: drm_dbg_kms(&dev_priv->drm, "Link Training Compliance Test requested\n"); /* Send a Hotplug Uevent to userspace to start modeset */ drm_kms_helper_hotplug_event(&dev_priv->drm); break; case DP_TEST_LINK_PHY_TEST_PATTERN: drm_dbg_kms(&dev_priv->drm, "PHY test pattern Compliance Test requested\n"); /* * Schedule long hpd to do the test * * FIXME get rid of the ad-hoc phy test modeset code * and properly incorporate it into the normal modeset. */ return false; } return true; } /* XXX this is probably wrong for multiple downstream ports */ static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u8 *dpcd = intel_dp->dpcd; u8 type; if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) return connector_status_connected; lspcon_resume(dig_port); if (!intel_dp_get_dpcd(intel_dp)) return connector_status_disconnected; /* if there's no downstream port, we're done */ if (!drm_dp_is_branch(dpcd)) return connector_status_connected; /* If we're HPD-aware, SINK_COUNT changes dynamically */ if (intel_dp_has_sink_count(intel_dp) && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { return intel_dp->sink_count ? connector_status_connected : connector_status_disconnected; } if (intel_dp_can_mst(intel_dp)) return connector_status_connected; /* If no HPD, poke DDC gently */ if (drm_probe_ddc(&intel_dp->aux.ddc)) return connector_status_connected; /* Well we tried, say unknown for unreliable port types */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) return connector_status_unknown; } else { type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK; if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || type == DP_DWN_STRM_PORT_TYPE_OTHER) return connector_status_unknown; } /* Anything else is out of spec, warn and ignore */ drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); return connector_status_disconnected; } static enum drm_connector_status edp_detect(struct intel_dp *intel_dp) { return connector_status_connected; } /* * intel_digital_port_connected - is the specified port connected? * @encoder: intel_encoder * * In cases where there's a connector physically connected but it can't be used * by our hardware we also return false, since the rest of the driver should * pretty much treat the port as disconnected. This is relevant for type-C * (starting on ICL) where there's ownership involved. * * Return %true if port is connected, %false otherwise. */ bool intel_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_connected = false; intel_wakeref_t wakeref; with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) is_connected = dig_port->connected(encoder); return is_connected; } static const struct drm_edid * intel_dp_get_edid(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; const struct drm_edid *fixed_edid = connector->panel.fixed_edid; /* Use panel fixed edid if we have one */ if (fixed_edid) { /* invalid edid */ if (IS_ERR(fixed_edid)) return NULL; return drm_edid_dup(fixed_edid); } return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); } static void intel_dp_update_dfp(struct intel_dp *intel_dp, const struct drm_edid *drm_edid) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; const struct edid *edid; /* FIXME: Get rid of drm_edid_raw() */ edid = drm_edid_raw(drm_edid); intel_dp->dfp.max_bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports, edid); intel_dp->dfp.max_dotclock = drm_dp_downstream_max_dotclock(intel_dp->dpcd, intel_dp->downstream_ports); intel_dp->dfp.min_tmds_clock = drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, intel_dp->downstream_ports, edid); intel_dp->dfp.max_tmds_clock = drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, intel_dp->downstream_ports, edid); intel_dp->dfp.pcon_max_frl_bw = drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, intel_dp->downstream_ports); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", connector->base.base.id, connector->base.name, intel_dp->dfp.max_bpc, intel_dp->dfp.max_dotclock, intel_dp->dfp.min_tmds_clock, intel_dp->dfp.max_tmds_clock, intel_dp->dfp.pcon_max_frl_bw); intel_dp_get_pcon_dsc_cap(intel_dp); } static bool intel_dp_can_ycbcr420(struct intel_dp *intel_dp) { if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) return true; if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) return true; if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) return true; return false; } static void intel_dp_update_420(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; intel_dp->dfp.ycbcr420_passthrough = drm_dp_downstream_420_passthrough(intel_dp->dpcd, intel_dp->downstream_ports); /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ intel_dp->dfp.ycbcr_444_to_420 = dp_to_dig_port(intel_dp)->lspcon.active || drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, intel_dp->downstream_ports); intel_dp->dfp.rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, intel_dp->downstream_ports, DP_DS_HDMI_BT709_RGB_YCBCR_CONV); connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", connector->base.base.id, connector->base.name, str_yes_no(intel_dp->dfp.rgb_to_ycbcr), str_yes_no(connector->base.ycbcr_420_allowed), str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); } static void intel_dp_set_edid(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; const struct drm_edid *drm_edid; const struct edid *edid; bool vrr_capable; intel_dp_unset_edid(intel_dp); drm_edid = intel_dp_get_edid(intel_dp); connector->detect_edid = drm_edid; /* Below we depend on display info having been updated */ drm_edid_connector_update(&connector->base, drm_edid); vrr_capable = intel_vrr_is_capable(connector); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); intel_dp_update_dfp(intel_dp, drm_edid); intel_dp_update_420(intel_dp); /* FIXME: Get rid of drm_edid_raw() */ edid = drm_edid_raw(drm_edid); drm_dp_cec_set_edid(&intel_dp->aux, edid); } static void intel_dp_unset_edid(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; drm_dp_cec_unset_edid(&intel_dp->aux); drm_edid_free(connector->detect_edid); connector->detect_edid = NULL; intel_dp->dfp.max_bpc = 0; intel_dp->dfp.max_dotclock = 0; intel_dp->dfp.min_tmds_clock = 0; intel_dp->dfp.max_tmds_clock = 0; intel_dp->dfp.pcon_max_frl_bw = 0; intel_dp->dfp.ycbcr_444_to_420 = false; connector->base.ycbcr_420_allowed = false; drm_connector_set_vrr_capable_property(&connector->base, false); } static int intel_dp_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); if (!INTEL_DISPLAY_ENABLED(dev_priv)) return connector_status_disconnected; /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); else if (intel_digital_port_connected(encoder)) status = intel_dp_detect_dpcd(intel_dp); else status = connector_status_disconnected; if (status == connector_status_disconnected) { memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); if (intel_dp->is_mst) { drm_dbg_kms(&dev_priv->drm, "MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } goto out; } /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ if (HAS_DSC(dev_priv)) intel_dp_get_dsc_sink_cap(intel_dp); intel_dp_configure_mst(intel_dp); /* * TODO: Reset link params when switching to MST mode, until MST * supports link training fallback params. */ if (intel_dp->reset_link_params || intel_dp->is_mst) { intel_dp_reset_max_link_params(intel_dp); intel_dp->reset_link_params = false; } intel_dp_print_rates(intel_dp); if (intel_dp->is_mst) { /* * If we are in MST mode then this connector * won't appear connected or have anything * with EDID on it */ status = connector_status_disconnected; goto out; } /* * Some external monitors do not signal loss of link synchronization * with an IRQ_HPD, so force a link status check. */ if (!intel_dp_is_edp(intel_dp)) { int ret; ret = intel_dp_retrain_link(encoder, ctx); if (ret) return ret; } /* * Clearing NACK and defer counts to get their exact values * while reading EDID which are required by Compliance tests * 4.2.2.4 and 4.2.2.5 */ intel_dp->aux.i2c_nack_count = 0; intel_dp->aux.i2c_defer_count = 0; intel_dp_set_edid(intel_dp); if (intel_dp_is_edp(intel_dp) || to_intel_connector(connector)->detect_edid) status = connector_status_connected; intel_dp_check_device_service_irq(intel_dp); out: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); /* * Make sure the refs for power wells enabled during detect are * dropped to avoid a new detect cycle triggered by HPD polling. */ intel_display_power_flush_work(dev_priv); if (!intel_dp_is_edp(intel_dp)) drm_dp_set_subconnector_property(connector, status, intel_dp->dpcd, intel_dp->downstream_ports); return status; } static void intel_dp_force(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); enum intel_display_power_domain aux_domain = intel_aux_power_domain(dig_port); intel_wakeref_t wakeref; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_dp_unset_edid(intel_dp); if (connector->status != connector_status_connected) return; wakeref = intel_display_power_get(dev_priv, aux_domain); intel_dp_set_edid(intel_dp); intel_display_power_put(dev_priv, aux_domain, wakeref); } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); int num_modes; /* drm_edid_connector_update() done in ->detect() or ->force() */ num_modes = drm_edid_connector_add_modes(connector); /* Also add fixed mode, which may or may not be present in EDID */ if (intel_dp_is_edp(intel_attached_dp(intel_connector))) num_modes += intel_panel_get_modes(intel_connector); if (num_modes) return num_modes; if (!intel_connector->detect_edid) { struct intel_dp *intel_dp = intel_attached_dp(intel_connector); struct drm_display_mode *mode; mode = drm_dp_downstream_mode(connector->dev, intel_dp->dpcd, intel_dp->downstream_ports); if (mode) { drm_mode_probed_add(connector, mode); num_modes++; } } return num_modes; } static int intel_dp_connector_register(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_lspcon *lspcon = &dig_port->lspcon; int ret; ret = intel_connector_register(connector); if (ret) return ret; drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", intel_dp->aux.name, connector->kdev->kobj.name); intel_dp->aux.dev = connector->kdev; ret = drm_dp_aux_register(&intel_dp->aux); if (!ret) drm_dp_cec_register_connector(&intel_dp->aux, connector); if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return ret; /* * ToDo: Clean this up to handle lspcon init and resume more * efficiently and streamlined. */ if (lspcon_init(dig_port)) { lspcon_detect_hdr_capability(lspcon); if (lspcon->hdr_supported) drm_connector_attach_hdr_output_metadata_property(connector); } return ret; } static void intel_dp_connector_unregister(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); drm_dp_cec_unregister_connector(&intel_dp->aux); drm_dp_aux_unregister(&intel_dp->aux); intel_connector_unregister(connector); } void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); struct intel_dp *intel_dp = &dig_port->dp; intel_dp_mst_encoder_cleanup(dig_port); intel_pps_vdd_off_sync(intel_dp); /* * Ensure power off delay is respected on module remove, so that we can * reduce delays at driver probe. See pps_init_timestamps(). */ intel_pps_wait_power_cycle(intel_dp); intel_dp_aux_fini(intel_dp); } void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_pps_vdd_off_sync(intel_dp); } void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_pps_wait_power_cycle(intel_dp); } static int intel_modeset_tile_group(struct intel_atomic_state *state, int tile_group_id) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter conn_iter; struct drm_connector *connector; int ret = 0; drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (!connector->has_tile || connector->tile_group->id != tile_group_id) continue; conn_state = drm_atomic_get_connector_state(&state->base, connector); if (IS_ERR(conn_state)) { ret = PTR_ERR(conn_state); break; } crtc = to_intel_crtc(conn_state->crtc); if (!crtc) continue; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); crtc_state->uapi.mode_changed = true; ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) break; } drm_connector_list_iter_end(&conn_iter); return ret; } static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; if (transcoders == 0) return 0; for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state; int ret; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); if (!crtc_state->hw.enable) continue; if (!(transcoders & BIT(crtc_state->cpu_transcoder))) continue; crtc_state->uapi.mode_changed = true; ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); if (ret) return ret; ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) return ret; transcoders &= ~BIT(crtc_state->cpu_transcoder); } drm_WARN_ON(&dev_priv->drm, transcoders != 0); return 0; } static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, struct drm_connector *connector) { const struct drm_connector_state *old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector); const struct intel_crtc_state *old_crtc_state; struct intel_crtc *crtc; u8 transcoders; crtc = to_intel_crtc(old_conn_state->crtc); if (!crtc) return 0; old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); if (!old_crtc_state->hw.active) return 0; transcoders = old_crtc_state->sync_mode_slaves_mask; if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) transcoders |= BIT(old_crtc_state->master_transcoder); return intel_modeset_affected_transcoders(state, transcoders); } static int intel_dp_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *_state) { struct drm_i915_private *dev_priv = to_i915(conn->dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); struct intel_connector *intel_conn = to_intel_connector(conn); struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); int ret; ret = intel_digital_connector_atomic_check(conn, &state->base); if (ret) return ret; if (intel_dp_mst_source_support(intel_dp)) { ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); if (ret) return ret; } /* * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. */ if (DISPLAY_VER(dev_priv) < 9) return 0; if (!intel_connector_needs_modeset(state, conn)) return 0; if (conn->has_tile) { ret = intel_modeset_tile_group(state, conn->tile_group->id); if (ret) return ret; } return intel_modeset_synced_crtcs(state, conn); } static void intel_dp_oob_hotplug_event(struct drm_connector *connector) { struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(connector->dev); spin_lock_irq(&i915->irq_lock); i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); spin_unlock_irq(&i915->irq_lock); queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); } static const struct drm_connector_funcs intel_dp_connector_funcs = { .force = intel_dp_force, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_dp_connector_register, .early_unregister = intel_dp_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, .oob_hotplug_event = intel_dp_oob_hotplug_event, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .detect_ctx = intel_dp_detect, .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, .atomic_check = intel_dp_connector_atomic_check, }; enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which * would require vdd on to handle it, and thus we * would end up in an endless cycle of * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." */ drm_dbg_kms(&i915->drm, "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", long_hpd ? "long" : "short", dig_port->base.base.base.id, dig_port->base.base.name); return IRQ_HANDLED; } drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", dig_port->base.base.base.id, dig_port->base.base.name, long_hpd ? "long" : "short"); if (long_hpd) { intel_dp->reset_link_params = true; return IRQ_NONE; } if (intel_dp->is_mst) { if (!intel_dp_check_mst_status(intel_dp)) return IRQ_NONE; } else if (!intel_dp_short_pulse(intel_dp)) { return IRQ_NONE; } return IRQ_HANDLED; } static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, const struct intel_bios_encoder_data *devdata, enum port port) { /* * eDP not supported on g4x. so bail out early just * for a bit extra safety in case the VBT is bonkers. */ if (DISPLAY_VER(dev_priv) < 5) return false; if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) return true; return devdata && intel_bios_encoder_supports_edp(devdata); } bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) { const struct intel_bios_encoder_data *devdata = intel_bios_encoder_data_lookup(i915, port); return _intel_dp_is_port_edp(i915, devdata, port); } static bool has_gamut_metadata_dip(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; if (intel_bios_encoder_is_lspcon(encoder->devdata)) return false; if (DISPLAY_VER(i915) >= 11) return true; if (port == PORT_A) return false; if (IS_HASWELL(i915) || IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) return true; return false; } static void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); enum port port = dp_to_dig_port(intel_dp)->base.port; if (!intel_dp_is_edp(intel_dp)) drm_connector_attach_dp_subconnector_property(connector); if (!IS_G4X(dev_priv) && port != PORT_A) intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); if (HAS_GMCH(dev_priv)) drm_connector_attach_max_bpc_property(connector, 6, 10); else if (DISPLAY_VER(dev_priv) >= 5) drm_connector_attach_max_bpc_property(connector, 6, 12); /* Register HDMI colorspace for case of lspcon */ if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { drm_connector_attach_content_type_property(connector); intel_attach_hdmi_colorspace_property(connector); } else { intel_attach_dp_colorspace_property(connector); } if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) drm_connector_attach_hdr_output_metadata_property(connector); if (HAS_VRR(dev_priv)) drm_connector_attach_vrr_capable_property(connector); } static void intel_edp_add_properties(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); intel_attach_scaling_mode_property(&connector->base); drm_connector_set_panel_orientation_with_quirk(&connector->base, i915->display.vbt.orientation, fixed_mode->hdisplay, fixed_mode->vdisplay); } static void intel_edp_backlight_setup(struct intel_dp *intel_dp, struct intel_connector *connector) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum pipe pipe = INVALID_PIPE; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { /* * Figure out the current pipe for the initial backlight setup. * If the current pipe isn't valid, try the PPS pipe, and if that * fails just assume pipe A. */ pipe = vlv_active_pipe(intel_dp); if (pipe != PIPE_A && pipe != PIPE_B) pipe = intel_dp->pps.pps_pipe; if (pipe != PIPE_A && pipe != PIPE_B) pipe = PIPE_A; } intel_backlight_setup(connector, pipe); } static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct drm_connector *connector = &intel_connector->base; struct drm_display_mode *fixed_mode; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool has_dpcd; const struct drm_edid *drm_edid; if (!intel_dp_is_edp(intel_dp)) return true; /* * On IBX/CPT we may get here with LVDS already registered. Since the * driver uses the only internal power sequencer available for both * eDP and LVDS bail out early in this case to prevent interfering * with an already powered-on LVDS power sequencer. */ if (intel_get_lvds_encoder(dev_priv)) { drm_WARN_ON(&dev_priv->drm, !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); drm_info(&dev_priv->drm, "LVDS was detected, not registering eDP\n"); return false; } intel_bios_init_panel_early(dev_priv, &intel_connector->panel, encoder->devdata); if (!intel_pps_init(intel_dp)) { drm_info(&dev_priv->drm, "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", encoder->base.base.id, encoder->base.name); /* * The BIOS may have still enabled VDD on the PPS even * though it's unusable. Make sure we turn it back off * and to release the power domain references/etc. */ goto out_vdd_off; } /* * Enable HPD sense for live status check. * intel_hpd_irq_setup() will turn it off again * if it's no longer needed later. * * The DPCD probe below will make sure VDD is on. */ intel_hpd_enable_detection(encoder); /* Cache DPCD and EDID for edp. */ has_dpcd = intel_edp_init_dpcd(intel_dp); if (!has_dpcd) { /* if this fails, presume the device is a ghost */ drm_info(&dev_priv->drm, "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", encoder->base.base.id, encoder->base.name); goto out_vdd_off; } /* * VBT and straps are liars. Also check HPD as that seems * to be the most reliable piece of information available. * * ... expect on devices that forgot to hook HPD up for eDP * (eg. Acer Chromebook C710), so we'll check it only if multiple * ports are attempting to use the same AUX CH, according to VBT. */ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) && !intel_digital_port_connected(encoder)) { /* * If this fails, presume the DPCD answer came * from some other port using the same AUX CH. * * FIXME maybe cleaner to check this before the * DPCD read? Would need sort out the VDD handling... */ drm_info(&dev_priv->drm, "[ENCODER:%d:%s] HPD is down, disabling eDP\n", encoder->base.base.id, encoder->base.name); goto out_vdd_off; } mutex_lock(&dev_priv->drm.mode_config.mutex); drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc); if (!drm_edid) { /* Fallback to EDID from ACPI OpRegion, if any */ drm_edid = intel_opregion_get_edid(intel_connector); if (drm_edid) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] Using OpRegion EDID\n", connector->base.id, connector->name); } if (drm_edid) { if (drm_edid_connector_update(connector, drm_edid) || !drm_edid_connector_add_modes(connector)) { drm_edid_connector_update(connector, NULL); drm_edid_free(drm_edid); drm_edid = ERR_PTR(-EINVAL); } } else { drm_edid = ERR_PTR(-ENOENT); } intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, IS_ERR(drm_edid) ? NULL : drm_edid); intel_panel_add_edid_fixed_modes(intel_connector, true); /* MSO requires information from the EDID */ intel_edp_mso_init(intel_dp); /* multiply the mode clock and horizontal timings for MSO */ list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) intel_edp_mso_mode_fixup(intel_connector, fixed_mode); /* fallback to VBT if available for eDP */ if (!intel_panel_preferred_fixed_mode(intel_connector)) intel_panel_add_vbt_lfp_fixed_mode(intel_connector); mutex_unlock(&dev_priv->drm.mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { drm_info(&dev_priv->drm, "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", encoder->base.base.id, encoder->base.name); goto out_vdd_off; } intel_panel_init(intel_connector, drm_edid); intel_edp_backlight_setup(intel_dp, intel_connector); intel_edp_add_properties(intel_dp); intel_pps_init_late(intel_dp); return true; out_vdd_off: intel_pps_vdd_off_sync(intel_dp); return false; } static void intel_dp_modeset_retry_work_fn(struct work_struct *work) { struct intel_connector *intel_connector; struct drm_connector *connector; intel_connector = container_of(work, typeof(*intel_connector), modeset_retry_work); connector = &intel_connector->base; drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); /* Grab the locks before changing connector property*/ mutex_lock(&connector->dev->mode_config.mutex); /* Set connector link status to BAD and send a Uevent to notify * userspace to do a modeset. */ drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_connector_hotplug_event(connector); } bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct intel_dp *intel_dp = &dig_port->dp; struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); int type; /* Initialize the work for modeset in case of link train failure */ INIT_WORK(&intel_connector->modeset_retry_work, intel_dp_modeset_retry_work_fn); if (drm_WARN(dev, dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", dig_port->max_lanes, intel_encoder->base.base.id, intel_encoder->base.name)) return false; intel_dp->reset_link_params = true; intel_dp->pps.pps_pipe = INVALID_PIPE; intel_dp->pps.active_pipe = INVALID_PIPE; /* Preserve the current hw state. */ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); intel_dp->attached_connector = intel_connector; if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { /* * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. */ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; /* eDP only on port B and/or C on vlv/chv */ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port != PORT_B && port != PORT_C)) return false; } else { type = DRM_MODE_CONNECTOR_DisplayPort; } intel_dp_set_default_sink_rates(intel_dp); intel_dp_set_default_max_sink_lane_count(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); drm_dbg_kms(&dev_priv->drm, "Adding %s connector on [ENCODER:%d:%s]\n", type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", intel_encoder->base.base.id, intel_encoder->base.name); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) connector->interlace_allowed = true; intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_dp_aux_init(intel_dp); intel_connector_attach_encoder(intel_connector, intel_encoder); if (HAS_DDI(dev_priv)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; if (!intel_edp_init_connector(intel_dp, intel_connector)) { intel_dp_aux_fini(intel_dp); goto fail; } intel_dp_set_source_rates(intel_dp); intel_dp_set_common_rates(intel_dp); intel_dp_reset_max_link_params(intel_dp); /* init MST on ports that can support it */ intel_dp_mst_encoder_init(dig_port, intel_connector->base.base.id); intel_dp_add_properties(intel_dp, connector); if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { int ret = intel_dp_hdcp_init(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, "HDCP init failed, skipping.\n"); } /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ if (IS_G45(dev_priv)) { u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); intel_de_write(dev_priv, PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; intel_psr_init(intel_dp); return true; fail: intel_display_power_flush_work(dev_priv); drm_connector_cleanup(connector); return false; } void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; if (!HAS_DISPLAY(dev_priv)) return; for_each_intel_encoder(&dev_priv->drm, encoder) { struct intel_dp *intel_dp; if (encoder->type != INTEL_OUTPUT_DDI) continue; intel_dp = enc_to_intel_dp(encoder); if (!intel_dp_mst_source_support(intel_dp)) continue; if (intel_dp->is_mst) drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); } } void intel_dp_mst_resume(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; if (!HAS_DISPLAY(dev_priv)) return; for_each_intel_encoder(&dev_priv->drm, encoder) { struct intel_dp *intel_dp; int ret; if (encoder->type != INTEL_OUTPUT_DDI) continue; intel_dp = enc_to_intel_dp(encoder); if (!intel_dp_mst_source_support(intel_dp)) continue; ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, true); if (ret) { intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, false); } } }
linux-master
drivers/gpu/drm/i915/display/intel_dp.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include "hsw_ips.h" #include "i915_debugfs.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_crtc_state_dump.h" #include "intel_display_debugfs.h" #include "intel_display_power.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_drrs.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_panel.h" #include "intel_psr.h" #include "intel_psr_regs.h" #include "intel_wm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { return to_i915(node->minor->dev); } static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); seq_printf(m, "FB tracking busy bits: 0x%08x\n", dev_priv->display.fb_tracking.busy_bits); seq_printf(m, "FB tracking flip bits: 0x%08x\n", dev_priv->display.fb_tracking.flip_bits); return 0; } static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; bool sr_enabled = false; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); if (DISPLAY_VER(dev_priv) >= 9) /* no global SR status; inspect per-plane WM */; else if (HAS_PCH_SPLIT(dev_priv)) sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE; else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || IS_I945G(dev_priv) || IS_I945GM(dev_priv)) sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev_priv)) sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev_priv)) sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled)); return 0; } static int i915_opregion(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_opregion *opregion = &i915->display.opregion; if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); return 0; } static int i915_vbt(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_opregion *opregion = &i915->display.opregion; if (opregion->vbt) seq_write(m, opregion->vbt, opregion->vbt_size); return 0; } static int i915_gem_framebuffer_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_framebuffer *fbdev_fb = NULL; struct drm_framebuffer *drm_fb; #ifdef CONFIG_DRM_FBDEV_EMULATION fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev); if (fbdev_fb) { seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fbdev_fb->base.width, fbdev_fb->base.height, fbdev_fb->base.format->depth, fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); seq_putc(m, '\n'); } #endif mutex_lock(&dev_priv->drm.mode_config.fb_lock); drm_for_each_fb(drm_fb, &dev_priv->drm) { struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); if (fb == fbdev_fb) continue; seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, fb->base.format->depth, fb->base.format->cpp[0] * 8, fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); seq_putc(m, '\n'); } mutex_unlock(&dev_priv->drm.mode_config.fb_lock); return 0; } static int i915_power_domain_info(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); intel_display_power_debug(i915, m); return 0; } static void intel_seq_print_mode(struct seq_file *m, int tabs, const struct drm_display_mode *mode) { int i; for (i = 0; i < tabs; i++) seq_putc(m, '\t'); seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } static void intel_encoder_info(struct seq_file *m, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_connector_list_iter conn_iter; struct drm_connector *connector; seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", encoder->base.base.id, encoder->base.name); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { const struct drm_connector_state *conn_state = connector->state; if (conn_state->best_encoder != &encoder->base) continue; seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); } drm_connector_list_iter_end(&conn_iter); } static void intel_panel_info(struct seq_file *m, struct intel_connector *connector) { const struct drm_display_mode *fixed_mode; if (list_empty(&connector->panel.fixed_modes)) return; seq_puts(m, "\tfixed modes:\n"); list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) intel_seq_print_mode(m, 2, fixed_mode); } static void intel_hdcp_info(struct seq_file *m, struct intel_connector *intel_connector) { bool hdcp_cap, hdcp2_cap; if (!intel_connector->hdcp.shim) { seq_puts(m, "No Connector Support"); goto out; } hdcp_cap = intel_hdcp_capable(intel_connector); hdcp2_cap = intel_hdcp2_capable(intel_connector); if (hdcp_cap) seq_puts(m, "HDCP1.4 "); if (hdcp2_cap) seq_puts(m, "HDCP2.2 "); if (!hdcp_cap && !hdcp2_cap) seq_puts(m, "None"); out: seq_puts(m, "\n"); } static void intel_dp_info(struct seq_file *m, struct intel_connector *connector) { struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); const struct edid *edid = drm_edid_raw(connector->detect_edid); seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", str_yes_no(connector->base.display_info.has_audio)); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, edid, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, struct intel_connector *connector) { bool has_audio = connector->base.display_info.has_audio; seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio)); } static void intel_hdmi_info(struct seq_file *m, struct intel_connector *connector) { bool has_audio = connector->base.display_info.has_audio; seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio)); } static void intel_connector_info(struct seq_file *m, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_connector_state *conn_state = connector->state; struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); const struct drm_display_mode *mode; seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", connector->base.id, connector->name, drm_get_connector_status_name(connector->status)); if (connector->status == connector_status_disconnected) return; seq_printf(m, "\tphysical dimensions: %dx%dmm\n", connector->display_info.width_mm, connector->display_info.height_mm); seq_printf(m, "\tsubpixel order: %s\n", drm_get_subpixel_order_name(connector->display_info.subpixel_order)); seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); if (!encoder) return; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: if (encoder->type == INTEL_OUTPUT_DP_MST) intel_dp_mst_info(m, intel_connector); else intel_dp_info(m, intel_connector); break; case DRM_MODE_CONNECTOR_HDMIA: if (encoder->type == INTEL_OUTPUT_HDMI || encoder->type == INTEL_OUTPUT_DDI) intel_hdmi_info(m, intel_connector); break; default: break; } seq_puts(m, "\tHDCP version: "); intel_hdcp_info(m, intel_connector); seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc); intel_panel_info(m, intel_connector); seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) intel_seq_print_mode(m, 2, mode); } static const char *plane_type(enum drm_plane_type type) { switch (type) { case DRM_PLANE_TYPE_OVERLAY: return "OVL"; case DRM_PLANE_TYPE_PRIMARY: return "PRI"; case DRM_PLANE_TYPE_CURSOR: return "CUR"; /* * Deliberately omitting default: to generate compiler warnings * when a new drm_plane_type gets added. */ } return "unknown"; } static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) { /* * According to doc only one DRM_MODE_ROTATE_ is allowed but this * will print them all to visualize if the values are misused */ snprintf(buf, bufsize, "%s%s%s%s%s%s(0x%08x)", (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", rotation); } static const char *plane_visibility(const struct intel_plane_state *plane_state) { if (plane_state->uapi.visible) return "visible"; if (plane_state->planar_slave) return "planar-slave"; return "hidden"; } static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); const struct drm_framebuffer *fb = plane_state->uapi.fb; struct drm_rect src, dst; char rot_str[48]; src = drm_plane_state_src(&plane_state->uapi); dst = drm_plane_state_dest(&plane_state->uapi); plane_rotation(rot_str, sizeof(rot_str), plane_state->uapi.rotation); seq_puts(m, "\t\tuapi: [FB:"); if (fb) seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id, &fb->format->format, fb->modifier, fb->width, fb->height); else seq_puts(m, "0] n/a,0x0,0x0,"); seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", plane_visibility(plane_state), DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str); if (plane_state->planar_linked_plane) seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, plane_state->planar_slave ? "slave" : "master"); } static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); const struct drm_framebuffer *fb = plane_state->hw.fb; char rot_str[48]; if (!fb) return; plane_rotation(rot_str, sizeof(rot_str), plane_state->hw.rotation); seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", fb->base.id, &fb->format->format, fb->modifier, fb->width, fb->height, str_yes_no(plane_state->uapi.visible), DRM_RECT_FP_ARG(&plane_state->uapi.src), DRM_RECT_ARG(&plane_state->uapi.dst), rot_str); } static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_plane *plane; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", plane->base.base.id, plane->base.name, plane_type(plane->base.type)); intel_plane_uapi_info(m, plane); intel_plane_hw_info(m, plane); } } static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int num_scalers = crtc->num_scalers; int i; /* Not all platformas have a scaler */ if (num_scalers) { seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d", num_scalers, crtc_state->scaler_state.scaler_users, crtc_state->scaler_state.scaler_id, crtc_state->hw.scaling_filter); for (i = 0; i < num_scalers; i++) { const struct intel_scaler *sc = &crtc_state->scaler_state.scalers[i]; seq_printf(m, ", scalers[%d]: use=%s, mode=%x", i, str_yes_no(sc->in_use), sc->mode); } seq_puts(m, "\n"); } else { seq_puts(m, "\tNo scalers available on this platform\n"); } } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) static void crtc_updates_info(struct seq_file *m, struct intel_crtc *crtc, const char *hdr) { u64 count; int row; count = 0; for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) count += crtc->debug.vbl.times[row]; seq_printf(m, "%sUpdates: %llu\n", hdr, count); if (!count) return; for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { char columns[80] = " |"; unsigned int x; if (row & 1) { const char *units; if (row > 10) { x = 1000000; units = "ms"; } else { x = 1000; units = "us"; } snprintf(columns, sizeof(columns), "%4ld%s |", DIV_ROUND_CLOSEST(BIT(row + 9), x), units); } if (crtc->debug.vbl.times[row]) { x = ilog2(crtc->debug.vbl.times[row]); memset(columns + 8, '*', x); columns[8 + x] = '\0'; } seq_printf(m, "%s%s\n", hdr, columns); } seq_printf(m, "%sMin update: %lluns\n", hdr, crtc->debug.vbl.min); seq_printf(m, "%sMax update: %lluns\n", hdr, crtc->debug.vbl.max); seq_printf(m, "%sAverage update: %lluns\n", hdr, div64_u64(crtc->debug.vbl.sum, count)); seq_printf(m, "%sOverruns > %uus: %u\n", hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); } static int crtc_updates_show(struct seq_file *m, void *data) { crtc_updates_info(m, m->private, ""); return 0; } static int crtc_updates_open(struct inode *inode, struct file *file) { return single_open(file, crtc_updates_show, inode->i_private); } static ssize_t crtc_updates_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct intel_crtc *crtc = m->private; /* May race with an update. Meh. */ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); return len; } static const struct file_operations crtc_updates_fops = { .owner = THIS_MODULE, .open = crtc_updates_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = crtc_updates_write }; static void crtc_updates_add(struct intel_crtc *crtc) { debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry, crtc, &crtc_updates_fops); } #else static void crtc_updates_info(struct seq_file *m, struct intel_crtc *crtc, const char *hdr) { } static void crtc_updates_add(struct intel_crtc *crtc) { } #endif static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_encoder *encoder; seq_printf(m, "[CRTC:%d:%s]:\n", crtc->base.base.id, crtc->base.name); seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", str_yes_no(crtc_state->uapi.enable), str_yes_no(crtc_state->uapi.active), DRM_MODE_ARG(&crtc_state->uapi.mode)); seq_printf(m, "\thw: enable=%s, active=%s\n", str_yes_no(crtc_state->hw.enable), str_yes_no(crtc_state->hw.active)); seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n", DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n", DRM_MODE_ARG(&crtc_state->hw.pipe_mode)); seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n", DRM_RECT_ARG(&crtc_state->pipe_src), str_yes_no(crtc_state->dither), crtc_state->pipe_bpp); intel_scaler_info(m, crtc); if (crtc_state->bigjoiner_pipes) seq_printf(m, "\tLinked to 0x%x pipes as a %s\n", crtc_state->bigjoiner_pipes, intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master"); for_each_intel_encoder_mask(&dev_priv->drm, encoder, crtc_state->uapi.encoder_mask) intel_encoder_info(m, crtc, encoder); intel_plane_info(m, crtc); seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", str_yes_no(!crtc->cpu_fifo_underrun_disabled), str_yes_no(!crtc->pch_fifo_underrun_disabled)); crtc_updates_info(m, crtc, "\t"); } static int i915_display_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; intel_wakeref_t wakeref; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); drm_modeset_lock_all(&dev_priv->drm); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); for_each_intel_crtc(&dev_priv->drm, crtc) intel_crtc_info(m, crtc); seq_printf(m, "\n"); seq_printf(m, "Connector info\n"); seq_printf(m, "--------------\n"); drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) intel_connector_info(m, connector); drm_connector_list_iter_end(&conn_iter); drm_modeset_unlock_all(&dev_priv->drm); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); int i; drm_modeset_lock_all(&dev_priv->drm); seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", dev_priv->display.dpll.ref_clks.nssc, dev_priv->display.dpll.ref_clks.ssc); for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, pll->info->id); seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", pll->state.pipe_mask, pll->active_mask, str_yes_no(pll->on)); seq_printf(m, " tracked hardware state:\n"); seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); seq_printf(m, " dpll_md: 0x%08x\n", pll->state.hw_state.dpll_md); seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0); seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", pll->state.hw_state.mg_refclkin_ctl); seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", pll->state.hw_state.mg_clktop2_coreclkctl1); seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", pll->state.hw_state.mg_clktop2_hsclkctl); seq_printf(m, " mg_pll_div0: 0x%08x\n", pll->state.hw_state.mg_pll_div0); seq_printf(m, " mg_pll_div1: 0x%08x\n", pll->state.hw_state.mg_pll_div1); seq_printf(m, " mg_pll_lf: 0x%08x\n", pll->state.hw_state.mg_pll_lf); seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", pll->state.hw_state.mg_pll_frac_lock); seq_printf(m, " mg_pll_ssc: 0x%08x\n", pll->state.hw_state.mg_pll_ssc); seq_printf(m, " mg_pll_bias: 0x%08x\n", pll->state.hw_state.mg_pll_bias); seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", pll->state.hw_state.mg_pll_tdc_coldst_bias); } drm_modeset_unlock_all(&dev_priv->drm); return 0; } static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct skl_ddb_entry *entry; struct intel_crtc *crtc; if (DISPLAY_VER(dev_priv) < 9) return -ENODEV; drm_modeset_lock_all(&dev_priv->drm); seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; enum plane_id plane_id; seq_printf(m, "Pipe %c\n", pipe_name(pipe)); for_each_plane_id_on_crtc(crtc, plane_id) { entry = &crtc_state->wm.skl.plane_ddb[plane_id]; seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, entry->start, entry->end, skl_ddb_entry_size(entry)); } entry = &crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, entry->end, skl_ddb_entry_size(entry)); } drm_modeset_unlock_all(&dev_priv->drm); return 0; } static bool intel_lpsp_power_well_enabled(struct drm_i915_private *i915, enum i915_power_well_id power_well_id) { intel_wakeref_t wakeref; bool is_enabled; wakeref = intel_runtime_pm_get(&i915->runtime_pm); is_enabled = intel_display_power_well_is_enabled(i915, power_well_id); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return is_enabled; } static int i915_lpsp_status(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); bool lpsp_enabled = false; if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) { lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2); } else if (IS_DISPLAY_VER(i915, 11, 12)) { lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL); } else { seq_puts(m, "LPSP: not supported\n"); return 0; } seq_printf(m, "LPSP: %s\n", str_enabled_disabled(lpsp_enabled)); return 0; } static int i915_dp_mst_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_encoder *intel_encoder; struct intel_digital_port *dig_port; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; intel_encoder = intel_attached_encoder(to_intel_connector(connector)); if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) continue; dig_port = enc_to_dig_port(intel_encoder); if (!intel_dp_mst_source_support(&dig_port->dp)) continue; seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", dig_port->base.base.base.id, dig_port->base.base.name); drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); } drm_connector_list_iter_end(&conn_iter); return 0; } static ssize_t i915_displayport_test_active_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { char *input_buffer; int status = 0; struct drm_device *dev; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; int val = 0; dev = ((struct seq_file *)file->private_data)->private; if (len == 0) return 0; input_buffer = memdup_user_nul(ubuf, len); if (IS_ERR(input_buffer)) return PTR_ERR(input_buffer); drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_encoder *encoder; if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; encoder = to_intel_encoder(connector->encoder); if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) continue; if (encoder && connector->status == connector_status_connected) { intel_dp = enc_to_intel_dp(encoder); status = kstrtoint(input_buffer, 10, &val); if (status < 0) break; drm_dbg(dev, "Got %d for test active\n", val); /* To prevent erroneous activation of the compliance * testing code, only accept an actual value of 1 here */ if (val == 1) intel_dp->compliance.test_active = true; else intel_dp->compliance.test_active = false; } } drm_connector_list_iter_end(&conn_iter); kfree(input_buffer); if (status < 0) return status; *offp += len; return len; } static int i915_displayport_test_active_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_encoder *encoder; if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; encoder = to_intel_encoder(connector->encoder); if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) continue; if (encoder && connector->status == connector_status_connected) { intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_active) seq_puts(m, "1"); else seq_puts(m, "0"); } else seq_puts(m, "0"); } drm_connector_list_iter_end(&conn_iter); return 0; } static int i915_displayport_test_active_open(struct inode *inode, struct file *file) { return single_open(file, i915_displayport_test_active_show, inode->i_private); } static const struct file_operations i915_displayport_test_active_fops = { .owner = THIS_MODULE, .open = i915_displayport_test_active_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_displayport_test_active_write }; static int i915_displayport_test_data_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_encoder *encoder; if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; encoder = to_intel_encoder(connector->encoder); if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) continue; if (encoder && connector->status == connector_status_connected) { intel_dp = enc_to_intel_dp(encoder); if (intel_dp->compliance.test_type == DP_TEST_LINK_EDID_READ) seq_printf(m, "%lx", intel_dp->compliance.test_data.edid); else if (intel_dp->compliance.test_type == DP_TEST_LINK_VIDEO_PATTERN) { seq_printf(m, "hdisplay: %d\n", intel_dp->compliance.test_data.hdisplay); seq_printf(m, "vdisplay: %d\n", intel_dp->compliance.test_data.vdisplay); seq_printf(m, "bpc: %u\n", intel_dp->compliance.test_data.bpc); } else if (intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { seq_printf(m, "pattern: %d\n", intel_dp->compliance.test_data.phytest.phy_pattern); seq_printf(m, "Number of lanes: %d\n", intel_dp->compliance.test_data.phytest.num_lanes); seq_printf(m, "Link Rate: %d\n", intel_dp->compliance.test_data.phytest.link_rate); seq_printf(m, "level: %02x\n", intel_dp->train_set[0]); } } else seq_puts(m, "0"); } drm_connector_list_iter_end(&conn_iter); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); static int i915_displayport_test_type_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_encoder *encoder; if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; encoder = to_intel_encoder(connector->encoder); if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) continue; if (encoder && connector->status == connector_status_connected) { intel_dp = enc_to_intel_dp(encoder); seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); } else seq_puts(m, "0"); } drm_connector_list_iter_end(&conn_iter); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); static ssize_t i915_fifo_underrun_reset_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct drm_i915_private *dev_priv = filp->private_data; struct intel_crtc *crtc; int ret; bool reset; ret = kstrtobool_from_user(ubuf, cnt, &reset); if (ret) return ret; if (!reset) return cnt; for_each_intel_crtc(&dev_priv->drm, crtc) { struct drm_crtc_commit *commit; struct intel_crtc_state *crtc_state; ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); if (ret) return ret; crtc_state = to_intel_crtc_state(crtc->base.state); commit = crtc_state->uapi.commit; if (commit) { ret = wait_for_completion_interruptible(&commit->hw_done); if (!ret) ret = wait_for_completion_interruptible(&commit->flip_done); } if (!ret && crtc_state->hw.active) { drm_dbg_kms(&dev_priv->drm, "Re-arming FIFO underruns on pipe %c\n", pipe_name(crtc->pipe)); intel_crtc_arm_fifo_underrun(crtc, crtc_state); } drm_modeset_unlock(&crtc->base.mutex); if (ret) return ret; } intel_fbc_reset_underrun(dev_priv); return cnt; } static const struct file_operations i915_fifo_underrun_reset_ops = { .owner = THIS_MODULE, .open = simple_open, .write = i915_fifo_underrun_reset_write, .llseek = default_llseek, }; static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, {"i915_vbt", i915_vbt, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_ddb_info", i915_ddb_info, 0}, {"i915_lpsp_status", i915_lpsp_status, 0}, }; static const struct { const char *name; const struct file_operations *fops; } intel_display_debugfs_files[] = { {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, }; void intel_display_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; int i; for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { debugfs_create_file(intel_display_debugfs_files[i].name, S_IRUGO | S_IWUSR, minor->debugfs_root, to_i915(minor->dev), intel_display_debugfs_files[i].fops); } drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); intel_cdclk_debugfs_register(i915); intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); intel_psr_debugfs_register(i915); intel_wm_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "Panel power up delay: %d\n", intel_dp->pps.panel_power_up_delay); seq_printf(m, "Panel power down delay: %d\n", intel_dp->pps.panel_power_down_delay); seq_printf(m, "Backlight on delay: %d\n", intel_dp->pps.backlight_on_delay); seq_printf(m, "Backlight off delay: %d\n", intel_dp->pps.backlight_off_delay); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_panel); static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); int ret; ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); if (ret) return ret; if (!connector->encoder || connector->status != connector_status_connected) { ret = -ENODEV; goto out; } seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); intel_hdcp_info(m, intel_connector); out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return ret; } DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); static int i915_lpsp_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_encoder *encoder; bool lpsp_capable = false; encoder = intel_attached_encoder(to_intel_connector(connector)); if (!encoder) return -ENODEV; if (connector->status != connector_status_connected) return -ENODEV; if (DISPLAY_VER(i915) >= 13) lpsp_capable = encoder->port <= PORT_B; else if (DISPLAY_VER(i915) >= 12) /* * Actually TGL can drive LPSP on port till DDI_C * but there is no physical connected DDI_C on TGL sku's, * even driver is not initilizing DDI_C port for gen12. */ lpsp_capable = encoder->port <= PORT_B; else if (DISPLAY_VER(i915) == 11) lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI || connector->connector_type == DRM_MODE_CONNECTOR_eDP); else if (IS_DISPLAY_VER(i915, 9, 10)) lpsp_capable = (encoder->port == PORT_A && (connector->connector_type == DRM_MODE_CONNECTOR_DSI || connector->connector_type == DRM_MODE_CONNECTOR_eDP || connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP; seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable"); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); static int i915_dsc_fec_support_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_device *dev = connector->dev; struct drm_crtc *crtc; struct intel_dp *intel_dp; struct drm_modeset_acquire_ctx ctx; struct intel_crtc_state *crtc_state = NULL; int ret = 0; bool try_again = false; drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); do { try_again = false; ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); if (ret) { if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { try_again = true; continue; } break; } crtc = connector->state->crtc; if (connector->status != connector_status_connected || !crtc) { ret = -ENODEV; break; } ret = drm_modeset_lock(&crtc->mutex, &ctx); if (ret == -EDEADLK) { ret = drm_modeset_backoff(&ctx); if (!ret) { try_again = true; continue; } break; } else if (ret) { break; } intel_dp = intel_attached_dp(to_intel_connector(connector)); crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", str_yes_no(crtc_state->dsc.compression_enable)); seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); seq_printf(m, "DSC_Output_Format_Sink_Support: RGB: %s YCBCR420: %s YCBCR444: %s\n", str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, DP_DSC_RGB)), str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, DP_DSC_YCbCr420_Native)), str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, DP_DSC_YCbCr444))); seq_printf(m, "Force_DSC_Enable: %s\n", str_yes_no(intel_dp->force_dsc_en)); if (!intel_dp_is_edp(intel_dp)) seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(drm_dp_sink_supports_fec(intel_dp->fec_capable))); } while (try_again); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; } static ssize_t i915_dsc_fec_support_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { bool dsc_enable = false; int ret; struct drm_connector *connector = ((struct seq_file *)file->private_data)->private; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (len == 0) return 0; drm_dbg(&i915->drm, "Copied %zu bytes from user to force DSC\n", len); ret = kstrtobool_from_user(ubuf, len, &dsc_enable); if (ret < 0) return ret; drm_dbg(&i915->drm, "Got %s for DSC Enable\n", (dsc_enable) ? "true" : "false"); intel_dp->force_dsc_en = dsc_enable; *offp += len; return len; } static int i915_dsc_fec_support_open(struct inode *inode, struct file *file) { return single_open(file, i915_dsc_fec_support_show, inode->i_private); } static const struct file_operations i915_dsc_fec_support_fops = { .owner = THIS_MODULE, .open = i915_dsc_fec_support_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_dsc_fec_support_write }; static int i915_dsc_bpc_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_device *dev = connector->dev; struct drm_crtc *crtc; struct intel_crtc_state *crtc_state; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); int ret; if (!encoder) return -ENODEV; ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); if (ret) return ret; crtc = connector->state->crtc; if (connector->status != connector_status_connected || !crtc) { ret = -ENODEV; goto out; } crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component); out: drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } static ssize_t i915_dsc_bpc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct drm_connector *connector = ((struct seq_file *)file->private_data)->private; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int dsc_bpc = 0; int ret; ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc); if (ret < 0) return ret; intel_dp->force_dsc_bpc = dsc_bpc; *offp += len; return len; } static int i915_dsc_bpc_open(struct inode *inode, struct file *file) { return single_open(file, i915_dsc_bpc_show, inode->i_private); } static const struct file_operations i915_dsc_bpc_fops = { .owner = THIS_MODULE, .open = i915_dsc_bpc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_dsc_bpc_write }; static int i915_dsc_output_format_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_device *dev = connector->dev; struct drm_crtc *crtc; struct intel_crtc_state *crtc_state; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); int ret; if (!encoder) return -ENODEV; ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); if (ret) return ret; crtc = connector->state->crtc; if (connector->status != connector_status_connected || !crtc) { ret = -ENODEV; goto out; } crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Output_Format: %s\n", intel_output_format_name(crtc_state->output_format)); out: drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } static ssize_t i915_dsc_output_format_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct drm_connector *connector = ((struct seq_file *)file->private_data)->private; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int dsc_output_format = 0; int ret; ret = kstrtoint_from_user(ubuf, len, 0, &dsc_output_format); if (ret < 0) return ret; intel_dp->force_dsc_output_format = dsc_output_format; *offp += len; return len; } static int i915_dsc_output_format_open(struct inode *inode, struct file *file) { return single_open(file, i915_dsc_output_format_show, inode->i_private); } static const struct file_operations i915_dsc_output_format_fops = { .owner = THIS_MODULE, .open = i915_dsc_output_format_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = i915_dsc_output_format_write }; /* * Returns the Current CRTC's bpc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc */ static int i915_current_bpc_show(struct seq_file *m, void *data) { struct intel_crtc *crtc = m->private; struct intel_crtc_state *crtc_state; int ret; ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); if (ret) return ret; crtc_state = to_intel_crtc_state(crtc->base.state); seq_printf(m, "Current: %u\n", crtc_state->pipe_bpp / 3); drm_modeset_unlock(&crtc->base.mutex); return ret; } DEFINE_SHOW_ATTRIBUTE(i915_current_bpc); /* Pipe may differ from crtc index if pipes are fused off */ static int intel_crtc_pipe_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; seq_printf(m, "%c\n", pipe_name(crtc->pipe)); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); /** * intel_connector_debugfs_add - add i915 specific connector debugfs files * @intel_connector: pointer to a registered drm_connector * * Cleanup will be done by drm_connector_unregister() through a call to * drm_debugfs_connector_remove(). */ void intel_connector_debugfs_add(struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct dentry *root = connector->debugfs_entry; struct drm_i915_private *dev_priv = to_i915(connector->dev); /* The connector must have been registered beforehands. */ if (!root) return; intel_drrs_connector_debugfs_add(intel_connector); intel_psr_connector_debugfs_add(intel_connector); if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) debugfs_create_file("i915_panel_timings", S_IRUGO, root, connector, &i915_panel_fops); if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, connector, &i915_hdcp_sink_capability_fops); } if (DISPLAY_VER(dev_priv) >= 11 && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { debugfs_create_file("i915_dsc_fec_support", 0644, root, connector, &i915_dsc_fec_support_fops); debugfs_create_file("i915_dsc_bpc", 0644, root, connector, &i915_dsc_bpc_fops); debugfs_create_file("i915_dsc_output_format", 0644, root, connector, &i915_dsc_output_format_fops); } if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || connector->connector_type == DRM_MODE_CONNECTOR_eDP || connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) debugfs_create_file("i915_lpsp_capability", 0444, root, connector, &i915_lpsp_capability_fops); } /** * intel_crtc_debugfs_add - add i915 specific crtc debugfs files * @crtc: pointer to a drm_crtc * * Failure to add debugfs entries should generally be ignored. */ void intel_crtc_debugfs_add(struct intel_crtc *crtc) { struct dentry *root = crtc->base.debugfs_entry; if (!root) return; crtc_updates_add(crtc); intel_drrs_crtc_debugfs_add(crtc); intel_fbc_crtc_debugfs_add(crtc); hsw_ips_crtc_debugfs_add(crtc); debugfs_create_file("i915_current_bpc", 0444, root, crtc, &i915_current_bpc_fops); debugfs_create_file("i915_pipe", 0444, root, crtc, &intel_crtc_pipe_fops); }
linux-master
drivers/gpu/drm/i915/display/intel_display_debugfs.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/string_helpers.h> #include "i915_reg.h" #include "intel_crtc.h" #include "intel_cx0_phy.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_panel.h" #include "intel_pps.h" #include "intel_snps_phy.h" #include "vlv_sideband.h" struct intel_dpll_funcs { int (*crtc_compute_clock)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*crtc_get_shared_dpll)(struct intel_atomic_state *state, struct intel_crtc *crtc); }; struct intel_limit { struct { int min, max; } dot, vco, n, m, m1, m2, p, p1; struct { int dot_limit; int p2_slow, p2_fast; } p2; }; static const struct intel_limit intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 2, .max = 33 }, .p2 = { .dot_limit = 165000, .p2_slow = 4, .p2_fast = 2 }, }; static const struct intel_limit intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 2, .max = 33 }, .p2 = { .dot_limit = 165000, .p2_slow = 4, .p2_fast = 4 }, }; static const struct intel_limit intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 1, .max = 6 }, .p2 = { .dot_limit = 165000, .p2_slow = 14, .p2_fast = 7 }, }; static const struct intel_limit intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 8, .max = 18 }, .m2 = { .min = 3, .max = 7 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, }; static const struct intel_limit intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 8, .max = 18 }, .m2 = { .min = 3, .max = 7 }, .p = { .min = 7, .max = 98 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 7 }, }; static const struct intel_limit intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 10, .max = 30 }, .p1 = { .min = 1, .max = 3}, .p2 = { .dot_limit = 270000, .p2_slow = 10, .p2_fast = 10 }, }; static const struct intel_limit intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 16, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8}, .p2 = { .dot_limit = 165000, .p2_slow = 10, .p2_fast = 5 }, }; static const struct intel_limit intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 0, .p2_slow = 14, .p2_fast = 14 }, }; static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 0, .p2_slow = 7, .p2_fast = 7 }, }; static const struct intel_limit pnv_limits_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, /* Pineview only has one combined m divider, which we treat as m2. */ .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, }; static const struct intel_limit pnv_limits_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 7, .max = 112 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 14 }, }; /* Ironlake / Sandybridge * * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ static const struct intel_limit ilk_limits_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 10, .p2_fast = 5 }, }; static const struct intel_limit ilk_limits_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 118 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, }; static const struct intel_limit ilk_limits_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 56 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, }; /* LVDS 100mhz refclk limits. */ static const struct intel_limit ilk_limits_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, }; static const struct intel_limit ilk_limits_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, }; static const struct intel_limit intel_limits_vlv = { /* * These are based on the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast * clock and actual rate limits are more relaxed, so checking * them would make no difference. */ .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 4000000, .max = 6000000 }, .n = { .min = 1, .max = 7 }, .m1 = { .min = 2, .max = 3 }, .m2 = { .min = 11, .max = 156 }, .p1 = { .min = 2, .max = 3 }, .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; static const struct intel_limit intel_limits_chv = { /* * These are based on the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast * clock and actual rate limits are more relaxed, so checking * them would make no difference. */ .dot = { .min = 25000, .max = 540000 }, .vco = { .min = 4800000, .max = 6480000 }, .n = { .min = 1, .max = 1 }, .m1 = { .min = 2, .max = 2 }, .m2 = { .min = 24 << 22, .max = 175 << 22 }, .p1 = { .min = 2, .max = 4 }, .p2 = { .p2_slow = 1, .p2_fast = 14 }, }; static const struct intel_limit intel_limits_bxt = { .dot = { .min = 25000, .max = 594000 }, .vco = { .min = 4800000, .max = 6700000 }, .n = { .min = 1, .max = 1 }, .m1 = { .min = 2, .max = 2 }, /* FIXME: find real m2 limits */ .m2 = { .min = 2 << 22, .max = 255 << 22 }, .p1 = { .min = 2, .max = 4 }, .p2 = { .p2_slow = 1, .p2_fast = 20 }, }; /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. * The helpers' return value is the rate of the clock that is fed to the * display engine's pipe which can be the above fast dot clock rate or a * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; if (WARN_ON(clock->n == 0 || clock->p == 0)) return 0; clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot; } static u32 i9xx_dpll_compute_m(const struct dpll *dpll) { return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } int i9xx_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) return 0; clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot; } int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2 * 5; if (WARN_ON(clock->n == 0 || clock->p == 0)) return 0; clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot; } int chv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2 * 5; if (WARN_ON(clock->n == 0 || clock->p == 0)) return 0; clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m), clock->n << 22); clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); return clock->dot; } /* * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, const struct intel_limit *limit, const struct dpll *clock) { if (clock->n < limit->n.min || limit->n.max < clock->n) return false; if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) return false; if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) return false; if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) return false; if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv)) if (clock->m1 <= clock->m2) return false; if (!IS_LP(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) return false; if (clock->m < limit->m.min || limit->m.max < clock->m) return false; } if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) return false; /* XXX: We may need to be checking "Dot clock" depending on the multiplier, * connector, etc., rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) return false; return true; } static int i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different * single/dual channel state, if we even can. */ if (intel_is_dual_link_lvds(dev_priv)) return limit->p2.p2_fast; else return limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) return limit->p2.p2_slow; else return limit->p2.p2_fast; } } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. * * Target and reference clocks are specified in kHz. * * If match_clock is provided, then best_clock P divider must match the P * divider from @match_clock used for LVDS downclocking. */ static bool i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, int target, int refclk, const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { if (clock.m2 >= clock.m1) break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; i9xx_calc_dpll_params(refclk, &clock); if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; if (match_clock && clock.p != match_clock->p) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return (err != target); } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. * * Target and reference clocks are specified in kHz. * * If match_clock is provided, then best_clock P divider must match the P * divider from @match_clock used for LVDS downclocking. */ static bool pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, int target, int refclk, const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; pnv_calc_dpll_params(refclk, &clock); if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; if (match_clock && clock.p != match_clock->p) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return (err != target); } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. * * Target and reference clocks are specified in kHz. * * If match_clock is provided, then best_clock P divider must match the P * divider from @match_clock used for LVDS downclocking. */ static bool g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, int target, int refclk, const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int max_n; bool found = false; /* approximately equals target * 0.00585 */ int err_most = (target >> 8) + (target >> 9); memset(best_clock, 0, sizeof(*best_clock)); clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); max_n = limit->n.max; /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { /* based on hardware requirement, prefere larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; clock.m2 >= limit->m2.min; clock.m2--) { for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { int this_err; i9xx_calc_dpll_params(refclk, &clock); if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; this_err = abs(clock.dot - target); if (this_err < err_most) { *best_clock = clock; err_most = this_err; max_n = clock.n; found = true; } } } } } return found; } /* * Check if the calculated PLL configuration is more optimal compared to the * best configuration and error found so far. Return the calculated error. */ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, const struct dpll *calculated_clock, const struct dpll *best_clock, unsigned int best_error_ppm, unsigned int *error_ppm) { /* * For CHV ignore the error and consider only the P value. * Prefer a bigger P value based on HW requirements. */ if (IS_CHERRYVIEW(to_i915(dev))) { *error_ppm = 0; return calculated_clock->p > best_clock->p; } if (drm_WARN_ON_ONCE(dev, !target_freq)) return false; *error_ppm = div_u64(1000000ULL * abs(target_freq - calculated_clock->dot), target_freq); /* * Prefer a better P value over a better (smaller) error if the error * is small. Ensure this preference for future configurations too by * setting the error to 0. */ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { *error_ppm = 0; return true; } return *error_ppm + 10 < best_error_ppm; } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. */ static bool vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, int target, int refclk, const struct dpll *match_clock, struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ int max_n = min(limit->n.max, refclk / 19200); bool found = false; memset(best_clock, 0, sizeof(*best_clock)); /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; clock.p2 -= clock.p2 > 10 ? 2 : 1) { clock.p = clock.p1 * clock.p2 * 5; /* based on hardware requirement, prefer bigger m1,m2 values */ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { unsigned int ppm; clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, refclk * clock.m1); vlv_calc_dpll_params(refclk, &clock); if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, bestppm, &ppm)) continue; *best_clock = clock; bestppm = ppm; found = true; } } } } return found; } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. */ static bool chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, int target, int refclk, const struct dpll *match_clock, struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; struct dpll clock; u64 m2; int found = false; memset(best_clock, 0, sizeof(*best_clock)); best_error_ppm = 1000000; /* * Based on hardware doc, the n always set to 1, and m1 always * set to 2. If requires to support 200Mhz refclk, we need to * revisit this because n may not 1 anymore. */ clock.n = 1; clock.m1 = 2; for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; clock.p2 -= clock.p2 > 10 ? 2 : 1) { unsigned int error_ppm; clock.p = clock.p1 * clock.p2 * 5; m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, refclk * clock.m1); if (m2 > INT_MAX/clock.m1) continue; clock.m2 = m2; chv_calc_dpll_params(refclk, &clock); if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) continue; if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, best_error_ppm, &error_ppm)) continue; *best_clock = clock; best_error_ppm = error_ppm; found = true; } } return found; } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock) { const struct intel_limit *limit = &intel_limits_bxt; int refclk = 100000; return chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, best_clock); } u32 i9xx_dpll_compute_fp(const struct dpll *dpll) { return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; } static u32 pnv_dpll_compute_fp(const struct dpll *dpll) { return (1 << dpll->n) << 16 | dpll->m2; } static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 fp, fp2; if (IS_PINEVIEW(dev_priv)) { fp = pnv_dpll_compute_fp(clock); fp2 = pnv_dpll_compute_fp(reduced_clock); } else { fp = i9xx_dpll_compute_fp(clock); fp2 = i9xx_dpll_compute_fp(reduced_clock); } crtc_state->dpll_hw_state.fp0 = fp; crtc_state->dpll_hw_state.fp1 = fp2; } static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = DPLL_VGA_MODE_DIS; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { dpll |= (crtc_state->pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_G4X(dev_priv)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; } else if (IS_PINEVIEW(dev_priv)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; WARN_ON(reduced_clock->p1 != clock->p1); } else { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; WARN_ON(reduced_clock->p1 != clock->p1); } switch (clock->p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } WARN_ON(reduced_clock->p2 != clock->p2); if (DISPLAY_VER(dev_priv) >= 4) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll = dpll; if (DISPLAY_VER(dev_priv) >= 4) { u32 dpll_md = (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; crtc_state->dpll_hw_state.dpll_md = dpll_md; } } static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = DPLL_VGA_MODE_DIS; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) dpll |= PLL_P1_DIVIDE_BY_TWO; else dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; if (clock->p2 == 4) dpll |= PLL_P2_DIVIDE_BY_4; } WARN_ON(reduced_clock->p1 != clock->p1); WARN_ON(reduced_clock->p2 != clock->p2); /* * Bspec: * "[Almador Errata}: For the correct operation of the muxed DVO pins * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock * Enable) must be set to “1” in both the DPLL A Control Register * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." * * For simplicity We simply keep both bits always enabled in * both DPLLS. The spec says we should disable the DVO 2X clock * when not needed, but this seems to work fine in practice. */ if (IS_I830(dev_priv) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll = dpll; } static int hsw_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); int ret; if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; ret = intel_compute_shared_dplls(state, crtc, encoder); if (ret) return ret; /* FIXME this is a mess */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; /* CRT dotclock is determined via other means */ if (!crtc_state->has_pch_encoder) crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; return intel_reserve_shared_dplls(state, crtc, encoder); } static int dg2_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); int ret; ret = intel_mpllb_calc_state(crtc_state, encoder); if (ret) return ret; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int mtl_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); enum phy phy = intel_port_to_phy(i915, encoder->port); int ret; ret = intel_cx0pll_calc_state(crtc_state, encoder); if (ret) return ret; /* TODO: Do the readback via intel_compute_shared_dplls() */ if (intel_is_c10phy(i915, phy)) crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); else crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { return dpll->m < factor * dpll->n; } static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 fp, fp2; int factor; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->display.vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev_priv))) factor = 25; } else if (crtc_state->sdvo_tv_clock) { factor = 20; } fp = i9xx_dpll_compute_fp(clock); if (ilk_needs_fb_cb_tune(clock, factor)) fp |= FP_CB_TUNE; fp2 = i9xx_dpll_compute_fp(reduced_clock); if (ilk_needs_fb_cb_tune(reduced_clock, factor)) fp2 |= FP_CB_TUNE; crtc_state->dpll_hw_state.fp0 = fp; crtc_state->dpll_hw_state.fp1 = fp2; } static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; ilk_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; dpll |= (crtc_state->pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; /* * The high speed IO clock is only really required for * SDVO/HDMI/DP, but we also enable it for CRT to make it * possible to share the DPLL between CRT and HDMI. Enabling * the clock needlessly does no real harm, except use up a * bit of power potentially. * * We'll limit this to IVB with 3 pipes, since it has only two * DPLLs and so DPLL sharing is the only way to get three pipes * driving PCH ports at the same time. On SNB we could do this, * and potentially avoid enabling the second DPLL, but it's not * clear if it''s a win or loss power wise. No point in doing * this on ILK at all since it has a fixed DPLL<->pipe mapping. */ if (INTEL_NUM_PIPES(dev_priv) == 3 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; switch (clock->p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } WARN_ON(reduced_clock->p2 != clock->p2); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll = dpll; } static int ilk_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; int ret; /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) return 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", dev_priv->display.vbt.lvds_ssc_freq); refclk = dev_priv->display.vbt.lvds_ssc_freq; } if (intel_is_dual_link_lvds(dev_priv)) { if (refclk == 100000) limit = &ilk_limits_dual_lvds_100m; else limit = &ilk_limits_dual_lvds; } else { if (refclk == 100000) limit = &ilk_limits_single_lvds_100m; else limit = &ilk_limits_single_lvds; } } else { limit = &ilk_limits_dac; } if (!crtc_state->clock_set && !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); ret = intel_compute_shared_dplls(state, crtc, NULL); if (ret) return ret; crtc_state->port_clock = crtc_state->dpll.dot; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return ret; } static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) return 0; return intel_reserve_shared_dplls(state, crtc, NULL); } void vlv_compute_dpll(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (crtc->pipe != PIPE_A) crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; crtc_state->dpll_hw_state.dpll_md = (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } void chv_compute_dpll(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (crtc->pipe != PIPE_A) crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll_md = (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } static int chv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_chv; int refclk = 100000; if (!crtc_state->clock_set && !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; chv_compute_dpll(crtc_state); /* FIXME this is a mess */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; crtc_state->port_clock = crtc_state->dpll.dot; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int vlv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_vlv; int refclk = 100000; if (!crtc_state->clock_set && !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { return -EINVAL; } vlv_compute_dpll(crtc_state); /* FIXME this is a mess */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; crtc_state->port_clock = crtc_state->dpll.dot; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int g4x_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); } if (intel_is_dual_link_lvds(dev_priv)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else { /* The option is for other outputs */ limit = &intel_limits_i9xx_sdvo; } if (!crtc_state->clock_set && !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); crtc_state->port_clock = crtc_state->dpll.dot; /* FIXME this is a mess */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int pnv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); } limit = &pnv_limits_lvds; } else { limit = &pnv_limits_sdvo; } if (!crtc_state->clock_set && !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); crtc_state->port_clock = crtc_state->dpll.dot; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); } limit = &intel_limits_i9xx_lvds; } else { limit = &intel_limits_i9xx_sdvo; } if (!crtc_state->clock_set && !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); crtc_state->port_clock = crtc_state->dpll.dot; /* FIXME this is a mess */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 48000; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); } limit = &intel_limits_i8xx_lvds; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { limit = &intel_limits_i8xx_dvo; } else { limit = &intel_limits_i8xx_dac; } if (!crtc_state->clock_set && !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) return -EINVAL; i8xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); crtc_state->port_clock = crtc_state->dpll.dot; crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); return 0; } static const struct intel_dpll_funcs mtl_dpll_funcs = { .crtc_compute_clock = mtl_crtc_compute_clock, }; static const struct intel_dpll_funcs dg2_dpll_funcs = { .crtc_compute_clock = dg2_crtc_compute_clock, }; static const struct intel_dpll_funcs hsw_dpll_funcs = { .crtc_compute_clock = hsw_crtc_compute_clock, .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs ilk_dpll_funcs = { .crtc_compute_clock = ilk_crtc_compute_clock, .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs chv_dpll_funcs = { .crtc_compute_clock = chv_crtc_compute_clock, }; static const struct intel_dpll_funcs vlv_dpll_funcs = { .crtc_compute_clock = vlv_crtc_compute_clock, }; static const struct intel_dpll_funcs g4x_dpll_funcs = { .crtc_compute_clock = g4x_crtc_compute_clock, }; static const struct intel_dpll_funcs pnv_dpll_funcs = { .crtc_compute_clock = pnv_crtc_compute_clock, }; static const struct intel_dpll_funcs i9xx_dpll_funcs = { .crtc_compute_clock = i9xx_crtc_compute_clock, }; static const struct intel_dpll_funcs i8xx_dpll_funcs = { .crtc_compute_clock = i8xx_crtc_compute_clock, }; int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (!crtc_state->hw.enable) return 0; ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", crtc->base.base.id, crtc->base.name); return ret; } return 0; } int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); if (!crtc_state->hw.enable || crtc_state->shared_dpll) return 0; if (!i915->display.funcs.dpll->crtc_get_shared_dpll) return 0; ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", crtc->base.base.id, crtc->base.name); return ret; } return 0; } void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 14) dev_priv->display.funcs.dpll = &mtl_dpll_funcs; else if (IS_DG2(dev_priv)) dev_priv->display.funcs.dpll = &dg2_dpll_funcs; else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) dev_priv->display.funcs.dpll = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->display.funcs.dpll = &ilk_dpll_funcs; else if (IS_CHERRYVIEW(dev_priv)) dev_priv->display.funcs.dpll = &chv_dpll_funcs; else if (IS_VALLEYVIEW(dev_priv)) dev_priv->display.funcs.dpll = &vlv_dpll_funcs; else if (IS_G4X(dev_priv)) dev_priv->display.funcs.dpll = &g4x_dpll_funcs; else if (IS_PINEVIEW(dev_priv)) dev_priv->display.funcs.dpll = &pnv_dpll_funcs; else if (DISPLAY_VER(dev_priv) != 2) dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; else dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; } static bool i9xx_has_pps(struct drm_i915_private *dev_priv) { if (IS_I830(dev_priv)) return false; return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll = crtc_state->dpll_hw_state.dpll; enum pipe pipe = crtc->pipe; int i; assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ if (i9xx_has_pps(dev_priv)) assert_pps_unlocked(dev_priv, pipe); intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0); intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); intel_de_write(dev_priv, DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); if (DISPLAY_VER(dev_priv) >= 4) { intel_de_write(dev_priv, DPLL_MD(pipe), crtc_state->dpll_hw_state.dpll_md); } else { /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ intel_de_write(dev_priv, DPLL(pipe), dpll); } /* We do this three times for luck */ for (i = 0; i < 3; i++) { intel_de_write(dev_priv, DPLL(pipe), dpll); intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* wait for warmup */ } } static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 reg_val; /* * PLLB opamp always calibrates to max value of 0x3f, force enable it * and set it to a reasonable value instead. */ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); reg_val &= 0xffffff00; reg_val |= 0x00000030; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); reg_val &= 0x00ffffff; reg_val |= 0x8c000000; vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); reg_val &= 0xffffff00; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); reg_val &= 0x00ffffff; reg_val |= 0xb0000000; vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); } static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; u32 coreclk, reg_val; vlv_dpio_get(dev_priv); bestn = crtc_state->dpll.n; bestm1 = crtc_state->dpll.m1; bestm2 = crtc_state->dpll.m2; bestp1 = crtc_state->dpll.p1; bestp2 = crtc_state->dpll.p2; /* See eDP HDMI DPIO driver vbios notes doc */ /* PLL B needs special handling */ if (pipe == PIPE_B) vlv_pllb_recal_opamp(dev_priv, pipe); /* Set up Tx target for periodic Rcomp update */ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); /* Disable target IRef on PLL */ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); reg_val &= 0x00ffffff; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); /* Disable fast lock */ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); /* Set idtafcrecal before PLL is enabled */ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); mdiv |= ((bestn << DPIO_N_SHIFT)); mdiv |= (1 << DPIO_K_SHIFT); /* * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, * but we don't support that). * Note: don't use the DAC post divider as it seems unstable. */ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); mdiv |= DPIO_ENABLE_CALIBRATION; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); /* Set HBR and RBR LPF coefficients */ if (crtc_state->port_clock == 162000 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x00d0000f); if (intel_crtc_has_dp_encoder(crtc_state)) { /* Use SSC source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 0x0df40000); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 0x0df70000); } else { /* HDMI or VGA */ /* Use bend source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 0x0df70000); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 0x0df40000); } coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; if (intel_crtc_has_dp_encoder(crtc_state)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); vlv_dpio_put(dev_priv); } static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); } void vlv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ assert_pps_unlocked(dev_priv, pipe); /* Enable Refclk */ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { vlv_prepare_pll(crtc_state); _vlv_enable_pll(crtc_state); } intel_de_write(dev_priv, DPLL_MD(pipe), crtc_state->dpll_hw_state.dpll_md); intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; u32 bestm2, bestp1, bestp2, bestm2_frac; u32 dpio_val; int vco; bestm2_frac = crtc_state->dpll.m2 & 0x3fffff; bestm2 = crtc_state->dpll.m2 >> 22; bestp1 = crtc_state->dpll.p1; bestp2 = crtc_state->dpll.p2; vco = crtc_state->dpll.vco; dpio_val = 0; loopfilter = 0; vlv_dpio_get(dev_priv); /* p1 and p2 divider */ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 5 << DPIO_CHV_S1_DIV_SHIFT | bestp1 << DPIO_CHV_P1_DIV_SHIFT | bestp2 << DPIO_CHV_P2_DIV_SHIFT | 1 << DPIO_CHV_K_DIV_SHIFT); /* Feedback post-divider - m2 */ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); /* Feedback refclk divider - n and m1 */ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), DPIO_CHV_M1_DIV_BY_2 | 1 << DPIO_CHV_N_DIV_SHIFT); /* M2 fraction division */ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); /* M2 fraction division enable */ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); if (bestm2_frac) dpio_val |= DPIO_CHV_FRAC_DIV_EN; vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); /* Program digital lock detect threshold */ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); if (!bestm2_frac) dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); /* Loop filter */ if (vco == 5400000) { loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); tribuf_calcntr = 0x9; } else if (vco <= 6200000) { loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); tribuf_calcntr = 0x9; } else if (vco <= 6480000) { loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); tribuf_calcntr = 0x8; } else { /* Not supported. Apply the same limits as in the max case */ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); tribuf_calcntr = 0; } vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); /* AFC Recal */ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | DPIO_AFC_RECAL); vlv_dpio_put(dev_priv); } static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 tmp; vlv_dpio_get(dev_priv); /* Enable back the 10bit clock to display controller */ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); tmp |= DPIO_DCLKP_EN; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); vlv_dpio_put(dev_priv); /* * Need to wait > 100ns between dclkp clock enable bit and PLL enable. */ udelay(1); /* Enable PLL */ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); /* Check PLL is locked */ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); } void chv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ assert_pps_unlocked(dev_priv, pipe); /* Enable Refclk and SSC */ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { chv_prepare_pll(crtc_state); _chv_enable_pll(crtc_state); } if (pipe != PIPE_A) { /* * WaPixelRepeatModeFixForC0:chv * * DPLLCMD is AWOL. Use chicken bits to propagate * the value from DPLLBMD to either pipe B or C. */ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); intel_de_write(dev_priv, DPLL_MD(PIPE_B), crtc_state->dpll_hw_state.dpll_md); intel_de_write(dev_priv, CBR4_VLV, 0); dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md; /* * DPLLB VGA mode also seems to cause problems. * We should always have it disabled. */ drm_WARN_ON(&dev_priv->drm, (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); } else { intel_de_write(dev_priv, DPLL_MD(pipe), crtc_state->dpll_hw_state.dpll_md); intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } } /** * vlv_force_pll_on - forcibly enable just the PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * @dpll: PLL configuration * * Enable the PLL for @pipe using the supplied @dpll config. To be used * in cases where we need the PLL enabled even when @pipe is not going to * be enabled. */ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_crtc_state *crtc_state; crtc_state = intel_crtc_state_alloc(crtc); if (!crtc_state) return -ENOMEM; crtc_state->cpu_transcoder = (enum transcoder)pipe; crtc_state->pixel_multiplier = 1; crtc_state->dpll = *dpll; crtc_state->output_types = BIT(INTEL_OUTPUT_EDP); if (IS_CHERRYVIEW(dev_priv)) { chv_compute_dpll(crtc_state); chv_enable_pll(crtc_state); } else { vlv_compute_dpll(crtc_state); vlv_enable_pll(crtc_state); } kfree(crtc_state); return 0; } void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 val; /* Make sure the pipe isn't still relying on us */ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; intel_de_write(dev_priv, DPLL(pipe), val); intel_de_posting_read(dev_priv, DPLL(pipe)); } void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 val; /* Make sure the pipe isn't still relying on us */ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; intel_de_write(dev_priv, DPLL(pipe), val); intel_de_posting_read(dev_priv, DPLL(pipe)); vlv_dpio_get(dev_priv); /* Disable 10bit clock to display controller */ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); val &= ~DPIO_DCLKP_EN; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); vlv_dpio_put(dev_priv); } void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* Don't disable pipe or pipe PLLs if needed */ if (IS_I830(dev_priv)) return; /* Make sure the pipe isn't still relying on us */ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); intel_de_posting_read(dev_priv, DPLL(pipe)); } /** * vlv_force_pll_off - forcibly disable just the PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to disable * * Disable the PLL for @pipe. To be used in cases where we need * the PLL enabled even when @pipe is not going to be enabled. */ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) { if (IS_CHERRYVIEW(dev_priv)) chv_disable_pll(dev_priv, pipe); else vlv_disable_pll(dev_priv, pipe); } /* Only for pre-ILK configs */ static void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { bool cur_state; cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; I915_STATE_WARN(dev_priv, cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) { assert_pll(i915, pipe, true); } void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) { assert_pll(i915, pipe, false); }
linux-master
drivers/gpu/drm/i915/display/intel_dpll.c
/* * Copyright © 2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * */ #include <drm/display/drm_dp_dual_mode_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_lspcon.h" #include "intel_hdmi.h" /* LSPCON OUI Vendor ID(signatures) */ #define LSPCON_VENDOR_PARADE_OUI 0x001CF8 #define LSPCON_VENDOR_MCA_OUI 0x0060AD #define DPCD_MCA_LSPCON_HDR_STATUS 0x70003 #define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511 /* AUX addresses to write MCA AVI IF */ #define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0 #define LSPCON_MCA_AVI_IF_CTRL 0x5DF #define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0) #define LSPCON_MCA_AVI_IF_HANDLED (1 << 1) /* AUX addresses to write Parade AVI IF */ #define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516 #define LSPCON_PARADE_AVI_IF_CTRL 0x51E #define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7) #define LSPCON_PARADE_AVI_IF_DATA_SIZE 32 static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) { struct intel_digital_port *dig_port = container_of(lspcon, struct intel_digital_port, lspcon); return &dig_port->dp; } static const char *lspcon_mode_name(enum drm_lspcon_mode mode) { switch (mode) { case DRM_LSPCON_MODE_PCON: return "PCON"; case DRM_LSPCON_MODE_LS: return "LS"; case DRM_LSPCON_MODE_INVALID: return "INVALID"; default: MISSING_CASE(mode); return "INVALID"; } } static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) { struct intel_dp *dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(dp); struct drm_dp_dpcd_ident *ident; u32 vendor_oui; if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { drm_err(&i915->drm, "Can't read description\n"); return false; } ident = &dp->desc.ident; vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) | ident->oui[2]; switch (vendor_oui) { case LSPCON_VENDOR_MCA_OUI: lspcon->vendor = LSPCON_VENDOR_MCA; drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n"); break; case LSPCON_VENDOR_PARADE_OUI: lspcon->vendor = LSPCON_VENDOR_PARADE; drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n"); break; default: drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n"); return false; } return true; } static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) { if (lspcon->vendor == LSPCON_VENDOR_MCA) return DPCD_MCA_LSPCON_HDR_STATUS; else return DPCD_PARADE_LSPCON_HDR_STATUS; } void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 hdr_caps; int ret; ret = drm_dp_dpcd_read(&intel_dp->aux, get_hdr_status_reg(lspcon), &hdr_caps, 1); if (ret < 0) { drm_dbg_kms(&i915->drm, "HDR capability detection failed\n"); lspcon->hdr_supported = false; } else if (hdr_caps & 0x1) { drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n"); lspcon->hdr_supported = true; } } static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode)) { drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n"); return DRM_LSPCON_MODE_INVALID; } return current_mode; } static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; current_mode = lspcon_get_current_mode(lspcon); if (current_mode == mode) goto out; drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) drm_err(&i915->drm, "LSPCON mode hasn't settled\n"); out: drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n", lspcon_mode_name(current_mode)); return current_mode; } static int lspcon_change_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); int err; enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode); if (err) { drm_err(&i915->drm, "Error reading LSPCON mode\n"); return err; } if (current_mode == mode) { drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n"); return 0; } err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode); if (err < 0) { drm_err(&i915->drm, "LSPCON mode change failed\n"); return err; } lspcon->mode = mode; drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n"); return 0; } static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 rev; if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, &rev) != 1) { drm_dbg_kms(&i915->drm, "Native AUX CH down\n"); return false; } drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n", rev >> 4, rev & 0xf); return true; } static bool lspcon_probe(struct intel_lspcon *lspcon) { int retry; enum drm_dp_dual_mode_type adaptor_type; struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct i2c_adapter *adapter = &intel_dp->aux.ddc; enum drm_lspcon_mode expected_mode; expected_mode = lspcon_wake_native_aux_ch(lspcon) ? DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS; /* Lets probe the adaptor and check its type */ for (retry = 0; retry < 6; retry++) { if (retry) usleep_range(500, 1000); adaptor_type = drm_dp_dual_mode_detect(intel_dp->aux.drm_dev, adapter); if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON) break; } if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n", drm_dp_get_dual_mode_type_name(adaptor_type)); return false; } /* Yay ... got a LSPCON device */ drm_dbg_kms(&i915->drm, "LSPCON detected\n"); lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); /* * In the SW state machine, lets Put LSPCON in PCON mode only. * In this way, it will work with both HDMI 1.4 sinks as well as HDMI * 2.0 sinks. */ if (lspcon->mode != DRM_LSPCON_MODE_PCON) { if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { drm_err(&i915->drm, "LSPCON mode change to PCON failed\n"); return false; } } return true; } static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); unsigned long start = jiffies; while (1) { if (intel_digital_port_connected(&dig_port->base)) { drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n", jiffies_to_msecs(jiffies - start)); return; } if (time_after(jiffies, start + msecs_to_jiffies(1000))) break; usleep_range(10000, 15000); } drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n"); } static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) { u8 avi_if_ctrl; u8 retry; ssize_t ret; /* Check if LSPCON FW is ready for data */ for (retry = 0; retry < 5; retry++) { if (retry) usleep_range(200, 300); ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL, &avi_if_ctrl, 1); if (ret < 0) { drm_err(aux->drm_dev, "Failed to read AVI IF control\n"); return false; } if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0) return true; } drm_err(aux->drm_dev, "Parade FW not ready to accept AVI IF\n"); return false; } static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, u8 *avi_buf) { u8 avi_if_ctrl; u8 block_count = 0; u8 *data; u16 reg; ssize_t ret; while (block_count < 4) { if (!lspcon_parade_fw_ready(aux)) { drm_dbg_kms(aux->drm_dev, "LSPCON FW not ready, block %d\n", block_count); return false; } reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET; data = avi_buf + block_count * 8; ret = drm_dp_dpcd_write(aux, reg, data, 8); if (ret < 0) { drm_err(aux->drm_dev, "Failed to write AVI IF block %d\n", block_count); return false; } /* * Once a block of data is written, we have to inform the FW * about this by writing into avi infoframe control register: * - set the kickoff bit[7] to 1 * - write the block no. to bits[1:0] */ reg = LSPCON_PARADE_AVI_IF_CTRL; avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count; ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1); if (ret < 0) { drm_err(aux->drm_dev, "Failed to update (0x%x), block %d\n", reg, block_count); return false; } block_count++; } drm_dbg_kms(aux->drm_dev, "Wrote AVI IF blocks successfully\n"); return true; } static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, const u8 *frame, ssize_t len) { u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; /* * Parade's frames contains 32 bytes of data, divided * into 4 frames: * Token byte (first byte of first frame, must be non-zero) * HB0 to HB2 from AVI IF (3 bytes header) * PB0 to PB27 from AVI IF (28 bytes data) * So it should look like this * first block: | <token> <HB0-HB2> <DB0-DB3> | * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>| */ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) { drm_err(aux->drm_dev, "Invalid length of infoframes\n"); return false; } memcpy(&avi_if[1], frame, len); if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) { drm_dbg_kms(aux->drm_dev, "Failed to write infoframe blocks\n"); return false; } return true; } static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, const u8 *buffer, ssize_t len) { int ret; u32 val = 0; u32 retry; u16 reg; const u8 *data = buffer; reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET; while (val < len) { /* DPCD write for AVI IF can fail on a slow FW day, so retry */ for (retry = 0; retry < 5; retry++) { ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1); if (ret == 1) { break; } else if (retry < 4) { mdelay(50); continue; } else { drm_err(aux->drm_dev, "DPCD write failed at:0x%x\n", reg); return false; } } val++; reg++; data++; } val = 0; reg = LSPCON_MCA_AVI_IF_CTRL; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */ val &= ~LSPCON_MCA_AVI_IF_HANDLED; val |= LSPCON_MCA_AVI_IF_KICKOFF; ret = drm_dp_dpcd_write(aux, reg, &val, 1); if (ret < 0) { drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } val = 0; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } if (val == LSPCON_MCA_AVI_IF_HANDLED) drm_dbg_kms(aux->drm_dev, "AVI IF handled by FW\n"); return true; } void lspcon_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { bool ret = true; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); switch (type) { case HDMI_INFOFRAME_TYPE_AVI: if (lspcon->vendor == LSPCON_VENDOR_MCA) ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, frame, len); else ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, frame, len); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n"); /* It uses the legacy hsw implementation for the same */ hsw_write_infoframe(encoder, crtc_state, type, frame, len); break; default: return; } if (!ret) { drm_err(&i915->drm, "Failed to write infoframes\n"); return; } } void lspcon_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, void *frame, ssize_t len) { /* FIXME implement for AVI Infoframe as well */ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA) hsw_read_infoframe(encoder, crtc_state, type, frame, len); } void lspcon_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { ssize_t ret; union hdmi_infoframe frame; u8 buf[VIDEO_DIP_DATA_SIZE]; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!lspcon->active) { drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n"); return; } /* FIXME precompute infoframes */ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, conn_state->connector, adjusted_mode); if (ret < 0) { drm_err(&i915->drm, "couldn't fill AVI infoframe\n"); return; } /* * Currently there is no interface defined to * check user preference between RGB/YCBCR444 * or YCBCR420. So the only possible case for * YCBCR444 usage is driving YCBCR420 output * with LSPCON, when pipe is configured for * YCBCR444 output and LSPCON takes care of * downsampling it. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) frame.avi.colorspace = HDMI_COLORSPACE_YUV420; else frame.avi.colorspace = HDMI_COLORSPACE_RGB; /* Set the Colorspace as per the HDMI spec */ drm_hdmi_avi_infoframe_colorimetry(&frame.avi, conn_state); /* nonsense combination */ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { drm_hdmi_avi_infoframe_quant_range(&frame.avi, conn_state->connector, adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); } else { frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; } drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state); ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { drm_err(&i915->drm, "Failed to pack AVI IF\n"); return; } dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, buf, ret); } static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux) { int ret; u32 val = 0; u16 reg = LSPCON_MCA_AVI_IF_CTRL; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } return val & LSPCON_MCA_AVI_IF_KICKOFF; } static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) { int ret; u32 val = 0; u16 reg = LSPCON_PARADE_AVI_IF_CTRL; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } return val & LSPCON_PARADE_AVI_IF_KICKOFF; } u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); bool infoframes_enabled; u32 val = 0; u32 mask, tmp; if (lspcon->vendor == LSPCON_VENDOR_MCA) infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux); else infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux); if (infoframes_enabled) val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); if (lspcon->hdr_supported) { tmp = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); mask = VIDEO_DIP_ENABLE_GMP_HSW; if (tmp & mask) val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } return val; } void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) { lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); } bool lspcon_init(struct intel_digital_port *dig_port) { struct intel_dp *intel_dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector *connector = &intel_dp->attached_connector->base; lspcon->active = false; lspcon->mode = DRM_LSPCON_MODE_INVALID; if (!lspcon_probe(lspcon)) { drm_err(&i915->drm, "Failed to probe lspcon\n"); return false; } if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) { drm_err(&i915->drm, "LSPCON DPCD read failed\n"); return false; } if (!lspcon_detect_vendor(lspcon)) { drm_err(&i915->drm, "LSPCON vendor detection failed\n"); return false; } connector->ycbcr_420_allowed = true; lspcon->active = true; drm_dbg_kms(&i915->drm, "Success: LSPCON init\n"); return true; } u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); return dig_port->infoframes_enabled(encoder, pipe_config); } void lspcon_resume(struct intel_digital_port *dig_port) { struct intel_lspcon *lspcon = &dig_port->lspcon; struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return; if (!lspcon->active) { if (!lspcon_init(dig_port)) { drm_err(&i915->drm, "LSPCON init failed on port %c\n", port_name(dig_port->base.port)); return; } } if (lspcon_wake_native_aux_ch(lspcon)) { expected_mode = DRM_LSPCON_MODE_PCON; lspcon_resume_in_pcon_wa(lspcon); } else { expected_mode = DRM_LSPCON_MODE_LS; } if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) return; if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) drm_err(&i915->drm, "LSPCON resume failed\n"); else drm_dbg_kms(&i915->drm, "LSPCON resume success\n"); }
linux-master
drivers/gpu/drm/i915/display/intel_lspcon.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include "i915_drv.h" #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_display_types.h" #include "intel_load_detect.h" /* VESA 640x480x72Hz mode to set on the pipe */ static const struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; static int intel_modeset_disable_planes(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_plane *plane; struct drm_plane_state *plane_state; int ret, i; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) return ret; for_each_new_plane_in_state(state, plane, plane_state, i) { if (plane_state->crtc != crtc) continue; ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); if (ret) return ret; drm_atomic_set_fb_for_plane(plane_state, NULL); } return 0; } struct drm_atomic_state * intel_load_detect_get_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_crtc *possible_crtc; struct intel_crtc *crtc = NULL; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_config *config = &dev->mode_config; struct drm_atomic_state *state = NULL, *restore_state = NULL; struct drm_connector_state *connector_state; struct intel_crtc_state *crtc_state; int ret; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.base.id, encoder->base.name); drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); /* * Algorithm gets a little messy: * * - if the connector already has an assigned crtc, use it (but make * sure it's on first) * * - try to find the first unused crtc that can drive this connector, * and use that if we find one */ /* See if we already have a CRTC for this connector */ if (connector->state->crtc) { crtc = to_intel_crtc(connector->state->crtc); ret = drm_modeset_lock(&crtc->base.mutex, ctx); if (ret) goto fail; /* Make sure the crtc and connector are running */ goto found; } /* Find an unused one (if possible) */ for_each_intel_crtc(dev, possible_crtc) { if (!(encoder->base.possible_crtcs & drm_crtc_mask(&possible_crtc->base))) continue; ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx); if (ret) goto fail; if (possible_crtc->base.state->enable) { drm_modeset_unlock(&possible_crtc->base.mutex); continue; } crtc = possible_crtc; break; } /* * If we didn't find an unused CRTC, don't use any. */ if (!crtc) { drm_dbg_kms(&dev_priv->drm, "no pipe available for load-detect\n"); ret = -ENODEV; goto fail; } found: state = drm_atomic_state_alloc(dev); restore_state = drm_atomic_state_alloc(dev); if (!state || !restore_state) { ret = -ENOMEM; goto fail; } state->acquire_ctx = ctx; to_intel_atomic_state(state)->internal = true; restore_state->acquire_ctx = ctx; to_intel_atomic_state(restore_state)->internal = true; connector_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(connector_state)) { ret = PTR_ERR(connector_state); goto fail; } ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base); if (ret) goto fail; crtc_state = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto fail; } crtc_state->uapi.active = true; ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &load_detect_mode); if (ret) goto fail; ret = intel_modeset_disable_planes(state, &crtc->base); if (ret) goto fail; ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); if (!ret) ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base)); if (!ret) ret = drm_atomic_add_affected_planes(restore_state, &crtc->base); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to create a copy of old state to restore: %i\n", ret); goto fail; } ret = drm_atomic_commit(state); if (ret) { drm_dbg_kms(&dev_priv->drm, "failed to set mode on load-detect pipe\n"); goto fail; } drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ intel_crtc_wait_for_next_vblank(crtc); return restore_state; fail: if (state) { drm_atomic_state_put(state); state = NULL; } if (restore_state) { drm_atomic_state_put(restore_state); restore_state = NULL; } if (ret == -EDEADLK) return ERR_PTR(ret); return NULL; } void intel_load_detect_release_pipe(struct drm_connector *connector, struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *intel_encoder = intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); struct drm_encoder *encoder = &intel_encoder->base; int ret; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); if (IS_ERR_OR_NULL(state)) return; ret = drm_atomic_helper_commit_duplicated_state(state, ctx); if (ret) drm_dbg_kms(&i915->drm, "Couldn't release load detect pipe: %i\n", ret); drm_atomic_state_put(state); }
linux-master
drivers/gpu/drm/i915/display/intel_load_detect.c