python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: MIT /* * Copyright © 2018 Intel Corporation */ #include <linux/dmi.h> #include "i915_drv.h" #include "intel_display_types.h" #include "intel_quirks.h" static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) { i915->display.quirks.mask |= BIT(quirk); } /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ static void quirk_ssc_force_disable(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE); drm_info(&i915->drm, "applying lvds SSC disable quirk\n"); } /* * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight * brightness value */ static void quirk_invert_brightness(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS); drm_info(&i915->drm, "applying inverted panel brightness quirk\n"); } /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT); drm_info(&i915->drm, "applying backlight present quirk\n"); } /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms * which is 300 ms greater than eDP spec T12 min. */ static void quirk_increase_t12_delay(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY); drm_info(&i915->drm, "Applying T12 delay quirk\n"); } /* * GeminiLake NUC HDMI outputs require additional off time * this allows the onboard retimer to correctly sync to signal */ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME); drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); } static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915) { intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); drm_info(&i915->drm, "Applying no pps backlight power quirk\n"); } struct intel_quirk { int device; int subsystem_vendor; int subsystem_device; void (*hook)(struct drm_i915_private *i915); }; /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ struct intel_dmi_quirk { void (*hook)(struct drm_i915_private *i915); const struct dmi_system_id (*dmi_id_list)[]; }; static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) { DRM_INFO("Backlight polarity reversed on %s\n", id->ident); return 1; } static int intel_dmi_no_pps_backlight(const struct dmi_system_id *id) { DRM_INFO("No pps backlight support on %s\n", id->ident); return 1; } static const struct intel_dmi_quirk intel_dmi_quirks[] = { { .dmi_id_list = &(const struct dmi_system_id[]) { { .callback = intel_dmi_reverse_brightness, .ident = "NCR Corporation", .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, ""), }, }, { .callback = intel_dmi_reverse_brightness, .ident = "Thundersoft TST178 tablet", /* DMI strings are too generic, also match on BIOS date */ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"), }, }, { } /* terminating entry */ }, .hook = quirk_invert_brightness, }, { .dmi_id_list = &(const struct dmi_system_id[]) { { .callback = intel_dmi_no_pps_backlight, .ident = "Google Lillipup sku524294", .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524294"), }, }, { .callback = intel_dmi_no_pps_backlight, .ident = "Google Lillipup sku524295", .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524295"), }, }, { } }, .hook = quirk_no_pps_backlight_power_hook, }, }; static struct intel_quirk intel_quirks[] = { /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, /* Sony Vaio Y cannot use SSC on LVDS */ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, /* Acer Aspire 5734Z must invert backlight brightness */ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, /* Acer/eMachines G725 */ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, /* Acer/eMachines e725 */ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, /* Acer/Packard Bell NCL20 */ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, /* Acer Aspire 4736Z */ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, /* Acer Aspire 5336 */ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, /* Acer C720 Chromebook (Core i3 4005U) */ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, /* Apple Macbook 2,1 (Core 2 T7400) */ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, /* Apple Macbook 4,1 */ { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, /* HP Chromebook 14 (Celeron 2955U) */ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, /* Dell Chromebook 11 */ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, /* Dell Chromebook 11 (2015 version) */ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, /* Toshiba Satellite P50-C-18C */ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, /* GeminiLake NUC */ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, /* ASRock ITX*/ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, /* ECS Liva Q2 */ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, /* HP Notebook - 14-r206nv */ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; void intel_init_quirks(struct drm_i915_private *i915) { struct pci_dev *d = to_pci_dev(i915->drm.dev); int i; for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { struct intel_quirk *q = &intel_quirks[i]; if (d->device == q->device && (d->subsystem_vendor == q->subsystem_vendor || q->subsystem_vendor == PCI_ANY_ID) && (d->subsystem_device == q->subsystem_device || q->subsystem_device == PCI_ANY_ID)) q->hook(i915); } for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) intel_dmi_quirks[i].hook(i915); } } bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) { return i915->display.quirks.mask & BIT(quirk); }
linux-master
drivers/gpu/drm/i915/display/intel_quirks.c
/* * Copyright © 2009 * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Daniel Vetter <[email protected]> * * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c */ #include <drm/drm_fourcc.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_frontbuffer.h" #include "intel_overlay.h" #include "intel_pci_config.h" /* Limits for overlay size. According to intel doc, the real limits are: * Y width: 4095, UV width (planar): 2047, Y height: 2047, * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use * the mininum of both. */ #define IMAGE_MAX_WIDTH 2048 #define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */ /* on 830 and 845 these large limits result in the card hanging */ #define IMAGE_MAX_WIDTH_LEGACY 1024 #define IMAGE_MAX_HEIGHT_LEGACY 1088 /* overlay register definitions */ /* OCMD register */ #define OCMD_TILED_SURFACE (0x1<<19) #define OCMD_MIRROR_MASK (0x3<<17) #define OCMD_MIRROR_MODE (0x3<<17) #define OCMD_MIRROR_HORIZONTAL (0x1<<17) #define OCMD_MIRROR_VERTICAL (0x2<<17) #define OCMD_MIRROR_BOTH (0x3<<17) #define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */ #define OCMD_UV_SWAP (0x1<<14) /* YVYU */ #define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */ #define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */ #define OCMD_SOURCE_FORMAT_MASK (0xf<<10) #define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */ #define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */ #define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */ #define OCMD_YUV_422_PACKED (0x8<<10) #define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */ #define OCMD_YUV_420_PLANAR (0xc<<10) #define OCMD_YUV_422_PLANAR (0xd<<10) #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) #define OCMD_BUF_TYPE_MASK (0x1<<5) #define OCMD_BUF_TYPE_FRAME (0x0<<5) #define OCMD_BUF_TYPE_FIELD (0x1<<5) #define OCMD_TEST_MODE (0x1<<4) #define OCMD_BUFFER_SELECT (0x3<<2) #define OCMD_BUFFER0 (0x0<<2) #define OCMD_BUFFER1 (0x1<<2) #define OCMD_FIELD_SELECT (0x1<<2) #define OCMD_FIELD0 (0x0<<1) #define OCMD_FIELD1 (0x1<<1) #define OCMD_ENABLE (0x1<<0) /* OCONFIG register */ #define OCONF_PIPE_MASK (0x1<<18) #define OCONF_PIPE_A (0x0<<18) #define OCONF_PIPE_B (0x1<<18) #define OCONF_GAMMA2_ENABLE (0x1<<16) #define OCONF_CSC_MODE_BT601 (0x0<<5) #define OCONF_CSC_MODE_BT709 (0x1<<5) #define OCONF_CSC_BYPASS (0x1<<4) #define OCONF_CC_OUT_8BIT (0x1<<3) #define OCONF_TEST_MODE (0x1<<2) #define OCONF_THREE_LINE_BUFFER (0x1<<0) #define OCONF_TWO_LINE_BUFFER (0x0<<0) /* DCLRKM (dst-key) register */ #define DST_KEY_ENABLE (0x1<<31) #define CLK_RGB24_MASK 0x0 #define CLK_RGB16_MASK 0x070307 #define CLK_RGB15_MASK 0x070707 #define RGB30_TO_COLORKEY(c) \ ((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2)) #define RGB16_TO_COLORKEY(c) \ ((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3)) #define RGB15_TO_COLORKEY(c) \ ((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3)) #define RGB8I_TO_COLORKEY(c) \ ((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0)) /* overlay flip addr flag */ #define OFC_UPDATE 0x1 /* polyphase filter coefficients */ #define N_HORIZ_Y_TAPS 5 #define N_VERT_Y_TAPS 3 #define N_HORIZ_UV_TAPS 3 #define N_VERT_UV_TAPS 3 #define N_PHASES 17 #define MAX_TAPS 5 /* memory bufferd overlay registers */ struct overlay_registers { u32 OBUF_0Y; u32 OBUF_1Y; u32 OBUF_0U; u32 OBUF_0V; u32 OBUF_1U; u32 OBUF_1V; u32 OSTRIDE; u32 YRGB_VPH; u32 UV_VPH; u32 HORZ_PH; u32 INIT_PHS; u32 DWINPOS; u32 DWINSZ; u32 SWIDTH; u32 SWIDTHSW; u32 SHEIGHT; u32 YRGBSCALE; u32 UVSCALE; u32 OCLRC0; u32 OCLRC1; u32 DCLRKV; u32 DCLRKM; u32 SCLRKVH; u32 SCLRKVL; u32 SCLRKEN; u32 OCONFIG; u32 OCMD; u32 RESERVED1; /* 0x6C */ u32 OSTART_0Y; u32 OSTART_1Y; u32 OSTART_0U; u32 OSTART_0V; u32 OSTART_1U; u32 OSTART_1V; u32 OTILEOFF_0Y; u32 OTILEOFF_1Y; u32 OTILEOFF_0U; u32 OTILEOFF_0V; u32 OTILEOFF_1U; u32 OTILEOFF_1V; u32 FASTHSCALE; /* 0xA0 */ u32 UVSCALEV; /* 0xA4 */ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; }; struct intel_overlay { struct drm_i915_private *i915; struct intel_context *context; struct intel_crtc *crtc; struct i915_vma *vma; struct i915_vma *old_vma; struct intel_frontbuffer *frontbuffer; bool active; bool pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ u32 color_key:24; u32 color_key_enabled:1; u32 brightness, contrast, saturation; u32 old_xscale, old_yscale; /* register access */ struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; u32 flip_addr; /* flip handling */ struct i915_active last_flip; void (*flip_complete)(struct intel_overlay *ovl); }; static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, bool enable) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u8 val; /* WA_OVERLAY_CLKGATE:alm */ if (enable) intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0); else intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), OVRUNIT_CLOCK_GATE_DISABLE); /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val); if (enable) val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE; else val |= I830_L2_CACHE_CLOCK_GATE_DISABLE; pci_bus_write_config_byte(pdev->bus, PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); } static struct i915_request * alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *)) { struct i915_request *rq; int err; overlay->flip_complete = fn; rq = i915_request_create(overlay->context); if (IS_ERR(rq)) return rq; err = i915_active_add_request(&overlay->last_flip, rq); if (err) { i915_request_add(rq); return ERR_PTR(err); } return rq; } /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; struct i915_request *rq; u32 *cs; drm_WARN_ON(&dev_priv->drm, overlay->active); rq = alloc_request(overlay, NULL); if (IS_ERR(rq)) return PTR_ERR(rq); cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { i915_request_add(rq); return PTR_ERR(cs); } overlay->active = true; if (IS_I830(dev_priv)) i830_overlay_clock_gating(dev_priv, false); *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON; *cs++ = overlay->flip_addr | OFC_UPDATE; *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; *cs++ = MI_NOOP; intel_ring_advance(rq, cs); i915_request_add(rq); return i915_active_wait(&overlay->last_flip); } static void intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) { enum pipe pipe = overlay->crtc->pipe; struct intel_frontbuffer *frontbuffer = NULL; drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); if (vma) frontbuffer = intel_frontbuffer_get(vma->obj); intel_frontbuffer_track(overlay->frontbuffer, frontbuffer, INTEL_FRONTBUFFER_OVERLAY(pipe)); if (overlay->frontbuffer) intel_frontbuffer_put(overlay->frontbuffer); overlay->frontbuffer = frontbuffer; intel_frontbuffer_flip_prepare(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(pipe)); overlay->old_vma = overlay->vma; if (vma) overlay->vma = i915_vma_get(vma); else overlay->vma = NULL; } /* overlay needs to be enabled in OCMD reg */ static int intel_overlay_continue(struct intel_overlay *overlay, struct i915_vma *vma, bool load_polyphase_filter) { struct drm_i915_private *dev_priv = overlay->i915; struct i915_request *rq; u32 flip_addr = overlay->flip_addr; u32 tmp, *cs; drm_WARN_ON(&dev_priv->drm, !overlay->active); if (load_polyphase_filter) flip_addr |= OFC_UPDATE; /* check for underruns */ tmp = intel_de_read(dev_priv, DOVSTA); if (tmp & (1 << 17)) drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp); rq = alloc_request(overlay, NULL); if (IS_ERR(rq)) return PTR_ERR(rq); cs = intel_ring_begin(rq, 2); if (IS_ERR(cs)) { i915_request_add(rq); return PTR_ERR(cs); } *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; *cs++ = flip_addr; intel_ring_advance(rq, cs); intel_overlay_flip_prepare(overlay, vma); i915_request_add(rq); return 0; } static void intel_overlay_release_old_vma(struct intel_overlay *overlay) { struct i915_vma *vma; vma = fetch_and_zero(&overlay->old_vma); if (drm_WARN_ON(&overlay->i915->drm, !vma)) return; intel_frontbuffer_flip_complete(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); i915_vma_unpin(vma); i915_vma_put(vma); } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { intel_overlay_release_old_vma(overlay); } static void intel_overlay_off_tail(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; intel_overlay_release_old_vma(overlay); overlay->crtc->overlay = NULL; overlay->crtc = NULL; overlay->active = false; if (IS_I830(dev_priv)) i830_overlay_clock_gating(dev_priv, true); } static void intel_overlay_last_flip_retire(struct i915_active *active) { struct intel_overlay *overlay = container_of(active, typeof(*overlay), last_flip); if (overlay->flip_complete) overlay->flip_complete(overlay); } /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { struct i915_request *rq; u32 *cs, flip_addr = overlay->flip_addr; drm_WARN_ON(&overlay->i915->drm, !overlay->active); /* According to intel docs the overlay hw may hang (when switching * off) without loading the filter coeffs. It is however unclear whether * this applies to the disabling of the overlay or to the switching off * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; rq = alloc_request(overlay, intel_overlay_off_tail); if (IS_ERR(rq)) return PTR_ERR(rq); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) { i915_request_add(rq); return PTR_ERR(cs); } /* wait for overlay to go idle */ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; *cs++ = flip_addr; *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; /* turn overlay off */ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF; *cs++ = flip_addr; *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; intel_ring_advance(rq, cs); intel_overlay_flip_prepare(overlay, NULL); i915_request_add(rq); return i915_active_wait(&overlay->last_flip); } /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { return i915_active_wait(&overlay->last_flip); } /* Wait for pending overlay flip and release old frame. * Needs to be called before the overlay register are changed * via intel_overlay_(un)map_regs */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; struct i915_request *rq; u32 *cs; /* * Only wait if there is actually an old frame to release to * guarantee forward progress. */ if (!overlay->old_vma) return 0; if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) { intel_overlay_release_old_vid_tail(overlay); return 0; } rq = alloc_request(overlay, intel_overlay_release_old_vid_tail); if (IS_ERR(rq)) return PTR_ERR(rq); cs = intel_ring_begin(rq, 2); if (IS_ERR(cs)) { i915_request_add(rq); return PTR_ERR(cs); } *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; *cs++ = MI_NOOP; intel_ring_advance(rq, cs); i915_request_add(rq); return i915_active_wait(&overlay->last_flip); } void intel_overlay_reset(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay = dev_priv->display.overlay; if (!overlay) return; overlay->old_xscale = 0; overlay->old_yscale = 0; overlay->crtc = NULL; overlay->active = false; } static int packed_depth_bytes(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: return 4; case I915_OVERLAY_YUV411: /* return 6; not implemented */ default: return -EINVAL; } } static int packed_width_bytes(u32 format, short width) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: return width << 1; default: return -EINVAL; } } static int uv_hsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: case I915_OVERLAY_YUV420: return 2; case I915_OVERLAY_YUV411: case I915_OVERLAY_YUV410: return 4; default: return -EINVAL; } } static int uv_vsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV420: case I915_OVERLAY_YUV410: return 2; case I915_OVERLAY_YUV422: case I915_OVERLAY_YUV411: return 1; default: return -EINVAL; } } static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) { u32 sw; if (DISPLAY_VER(dev_priv) == 2) sw = ALIGN((offset & 31) + width, 32); else sw = ALIGN((offset & 63) + width, 64); if (sw == 0) return 0; return (sw - 32) >> 3; } static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = { [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, }, [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, }, [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, }, [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, }, [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, }, [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, }, [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, }, [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, }, [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, }, [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, }, [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, }, [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, }, [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, }, [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, }, [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, }, [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, }, [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, }, }; static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = { [ 0] = { 0x3000, 0x1800, 0x1800, }, [ 1] = { 0xb000, 0x18d0, 0x2e60, }, [ 2] = { 0xb000, 0x1990, 0x2ce0, }, [ 3] = { 0xb020, 0x1a68, 0x2b40, }, [ 4] = { 0xb040, 0x1b20, 0x29e0, }, [ 5] = { 0xb060, 0x1bd8, 0x2880, }, [ 6] = { 0xb080, 0x1c88, 0x3e60, }, [ 7] = { 0xb0a0, 0x1d28, 0x3c00, }, [ 8] = { 0xb0c0, 0x1db8, 0x39e0, }, [ 9] = { 0xb0e0, 0x1e40, 0x37e0, }, [10] = { 0xb100, 0x1eb8, 0x3620, }, [11] = { 0xb100, 0x1f18, 0x34a0, }, [12] = { 0xb100, 0x1f68, 0x3360, }, [13] = { 0xb0e0, 0x1fa8, 0x3240, }, [14] = { 0xb0c0, 0x1fe0, 0x3140, }, [15] = { 0xb060, 0x1ff0, 0x30a0, }, [16] = { 0x3000, 0x0800, 0x3000, }, }; static void update_polyphase_filter(struct overlay_registers __iomem *regs) { memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs)); } static bool update_scaling_factors(struct intel_overlay *overlay, struct overlay_registers __iomem *regs, struct drm_intel_overlay_put_image *params) { /* fixed point with a 12 bit shift */ u32 xscale, yscale, xscale_UV, yscale_UV; #define FP_SHIFT 12 #define FRACT_MASK 0xfff bool scale_changed = false; int uv_hscale = uv_hsubsampling(params->flags); int uv_vscale = uv_vsubsampling(params->flags); if (params->dst_width > 1) xscale = ((params->src_scan_width - 1) << FP_SHIFT) / params->dst_width; else xscale = 1 << FP_SHIFT; if (params->dst_height > 1) yscale = ((params->src_scan_height - 1) << FP_SHIFT) / params->dst_height; else yscale = 1 << FP_SHIFT; /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ xscale_UV = xscale/uv_hscale; yscale_UV = yscale/uv_vscale; /* make the Y scale to UV scale ratio an exact multiply */ xscale = xscale_UV * uv_hscale; yscale = yscale_UV * uv_vscale; /*} else { xscale_UV = 0; yscale_UV = 0; }*/ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) scale_changed = true; overlay->old_xscale = xscale; overlay->old_yscale = yscale; iowrite32(((yscale & FRACT_MASK) << 20) | ((xscale >> FP_SHIFT) << 16) | ((xscale & FRACT_MASK) << 3), &regs->YRGBSCALE); iowrite32(((yscale_UV & FRACT_MASK) << 20) | ((xscale_UV >> FP_SHIFT) << 16) | ((xscale_UV & FRACT_MASK) << 3), &regs->UVSCALE); iowrite32((((yscale >> FP_SHIFT) << 16) | ((yscale_UV >> FP_SHIFT) << 0)), &regs->UVSCALEV); if (scale_changed) update_polyphase_filter(regs); return scale_changed; } static void update_colorkey(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { const struct intel_plane_state *state = to_intel_plane_state(overlay->crtc->base.primary->state); u32 key = overlay->color_key; u32 format = 0; u32 flags = 0; if (overlay->color_key_enabled) flags |= DST_KEY_ENABLE; if (state->uapi.visible) format = state->hw.fb->format->format; switch (format) { case DRM_FORMAT_C8: key = RGB8I_TO_COLORKEY(key); flags |= CLK_RGB24_MASK; break; case DRM_FORMAT_XRGB1555: key = RGB15_TO_COLORKEY(key); flags |= CLK_RGB15_MASK; break; case DRM_FORMAT_RGB565: key = RGB16_TO_COLORKEY(key); flags |= CLK_RGB16_MASK; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: key = RGB30_TO_COLORKEY(key); flags |= CLK_RGB24_MASK; break; default: flags |= CLK_RGB24_MASK; break; } iowrite32(key, &regs->DCLRKV); iowrite32(flags, &regs->DCLRKM); } static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params) { u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; if (params->flags & I915_OVERLAY_YUV_PLANAR) { switch (params->flags & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: cmd |= OCMD_YUV_422_PLANAR; break; case I915_OVERLAY_YUV420: cmd |= OCMD_YUV_420_PLANAR; break; case I915_OVERLAY_YUV411: case I915_OVERLAY_YUV410: cmd |= OCMD_YUV_410_PLANAR; break; } } else { /* YUV packed */ switch (params->flags & I915_OVERLAY_DEPTH_MASK) { case I915_OVERLAY_YUV422: cmd |= OCMD_YUV_422_PACKED; break; case I915_OVERLAY_YUV411: cmd |= OCMD_YUV_411_PACKED; break; } switch (params->flags & I915_OVERLAY_SWAP_MASK) { case I915_OVERLAY_NO_SWAP: break; case I915_OVERLAY_UV_SWAP: cmd |= OCMD_UV_SWAP; break; case I915_OVERLAY_Y_SWAP: cmd |= OCMD_Y_SWAP; break; case I915_OVERLAY_Y_AND_UV_SWAP: cmd |= OCMD_Y_AND_UV_SWAP; break; } } return cmd; } static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo) { struct i915_gem_ww_ctx ww; struct i915_vma *vma; int ret; i915_gem_ww_ctx_init(&ww, true); retry: ret = i915_gem_object_lock(new_bo, &ww); if (!ret) { vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, NULL, PIN_MAPPABLE); ret = PTR_ERR_OR_ZERO(vma); } if (ret == -EDEADLK) { ret = i915_gem_ww_ctx_backoff(&ww); if (!ret) goto retry; } i915_gem_ww_ctx_fini(&ww); if (ret) return ERR_PTR(ret); return vma; } static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct drm_i915_gem_object *new_bo, struct drm_intel_overlay_put_image *params) { struct overlay_registers __iomem *regs = overlay->regs; struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; bool scale_changed = false; struct i915_vma *vma; int ret, tmp_width; drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) return ret; atomic_inc(&dev_priv->gpu_error.pending_fb_pin); vma = intel_overlay_pin_fb(new_bo); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out_pin_section; } i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB); if (!overlay->active) { const struct intel_crtc_state *crtc_state = overlay->crtc->config; u32 oconfig = 0; if (crtc_state->gamma_enable && crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) oconfig |= OCONF_CC_OUT_8BIT; if (crtc_state->gamma_enable) oconfig |= OCONF_GAMMA2_ENABLE; if (DISPLAY_VER(dev_priv) == 4) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; iowrite32(oconfig, &regs->OCONFIG); ret = intel_overlay_on(overlay); if (ret != 0) goto out_unpin; } iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS); iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ); if (params->flags & I915_OVERLAY_YUV_PACKED) tmp_width = packed_width_bytes(params->flags, params->src_width); else tmp_width = params->src_width; swidth = params->src_width; swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_height; iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y); ostride = params->stride_Y; if (params->flags & I915_OVERLAY_YUV_PLANAR) { int uv_hscale = uv_hsubsampling(params->flags); int uv_vscale = uv_vsubsampling(params->flags); u32 tmp_U, tmp_V; swidth |= (params->src_width / uv_hscale) << 16; sheight |= (params->src_height / uv_vscale) << 16; tmp_U = calc_swidthsw(dev_priv, params->offset_U, params->src_width / uv_hscale); tmp_V = calc_swidthsw(dev_priv, params->offset_V, params->src_width / uv_hscale); swidthsw |= max(tmp_U, tmp_V) << 16; iowrite32(i915_ggtt_offset(vma) + params->offset_U, &regs->OBUF_0U); iowrite32(i915_ggtt_offset(vma) + params->offset_V, &regs->OBUF_0V); ostride |= params->stride_UV << 16; } iowrite32(swidth, &regs->SWIDTH); iowrite32(swidthsw, &regs->SWIDTHSW); iowrite32(sheight, &regs->SHEIGHT); iowrite32(ostride, &regs->OSTRIDE); scale_changed = update_scaling_factors(overlay, regs, params); update_colorkey(overlay, regs); iowrite32(overlay_cmd_reg(params), &regs->OCMD); ret = intel_overlay_continue(overlay, vma, scale_changed); if (ret) goto out_unpin; return 0; out_unpin: i915_vma_unpin(vma); out_pin_section: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); return ret; } int intel_overlay_switch_off(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; int ret; drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) return ret; if (!overlay->active) return 0; ret = intel_overlay_release_old_vid(overlay); if (ret != 0) return ret; iowrite32(0, &overlay->regs->OCMD); return intel_overlay_off(overlay); } static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, struct intel_crtc *crtc) { if (!crtc->active) return -EINVAL; /* can't use the overlay with double wide pipe */ if (crtc->config->double_wide) return -EINVAL; return 0; } static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ if (DISPLAY_VER(dev_priv) >= 4) { u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS); /* on i965 use the PGM reg to read out the autoscaler values */ ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp); } else { u32 tmp; if (intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_VERT_AUTO_SCALE) tmp = intel_de_read(dev_priv, PFIT_AUTO_RATIOS); else tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS); ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp); } overlay->pfit_vscale_ratio = ratio; } static int check_overlay_dst(struct intel_overlay *overlay, struct drm_intel_overlay_put_image *rec) { const struct intel_crtc_state *crtc_state = overlay->crtc->config; struct drm_rect req, clipped; drm_rect_init(&req, rec->dst_x, rec->dst_y, rec->dst_width, rec->dst_height); clipped = req; drm_rect_intersect(&clipped, &crtc_state->pipe_src); if (!drm_rect_visible(&clipped) || !drm_rect_equals(&clipped, &req)) return -EINVAL; return 0; } static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec) { u32 tmp; /* downscaling limit is 8.0 */ tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16; if (tmp > 7) return -EINVAL; tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16; if (tmp > 7) return -EINVAL; return 0; } static int check_overlay_src(struct drm_i915_private *dev_priv, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); u32 stride_mask; int depth; u32 tmp; /* check src dimensions */ if (IS_I845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; } else { if (rec->src_height > IMAGE_MAX_HEIGHT || rec->src_width > IMAGE_MAX_WIDTH) return -EINVAL; } /* better safe than sorry, use 4 as the maximal subsampling ratio */ if (rec->src_height < N_VERT_Y_TAPS*4 || rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: /* not implemented */ return -EINVAL; case I915_OVERLAY_YUV_PACKED: if (uv_vscale != 1) return -EINVAL; depth = packed_depth_bytes(rec->flags); if (depth < 0) return depth; /* ignore UV planes */ rec->stride_UV = 0; rec->offset_U = 0; rec->offset_V = 0; /* check pixel alignment */ if (rec->offset_Y % depth) return -EINVAL; break; case I915_OVERLAY_YUV_PLANAR: if (uv_vscale < 0 || uv_hscale < 0) return -EINVAL; /* no offset restrictions for planar formats */ break; default: return -EINVAL; } if (rec->src_width % uv_hscale) return -EINVAL; /* stride checking */ if (IS_I830(dev_priv) || IS_I845G(dev_priv)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 4096 : 8192; if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) return -EINVAL; /* check buffer dimensions */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: case I915_OVERLAY_YUV_PACKED: /* always 4 Y values per depth pixels */ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) return -EINVAL; tmp = rec->stride_Y*rec->src_height; if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; break; case I915_OVERLAY_YUV_PLANAR: if (rec->src_width > rec->stride_Y) return -EINVAL; if (rec->src_width/uv_hscale > rec->stride_UV) return -EINVAL; tmp = rec->stride_Y * rec->src_height; if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; tmp = rec->stride_UV * (rec->src_height / uv_vscale); if (rec->offset_U + tmp > new_bo->base.size || rec->offset_V + tmp > new_bo->base.size) return -EINVAL; break; } return 0; } int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_put_image *params = data; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; struct drm_i915_gem_object *new_bo; int ret; overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; } if (!(params->flags & I915_OVERLAY_ENABLE)) { drm_modeset_lock_all(dev); ret = intel_overlay_switch_off(overlay); drm_modeset_unlock_all(dev); return ret; } drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id); if (!drmmode_crtc) return -ENOENT; crtc = to_intel_crtc(drmmode_crtc); new_bo = i915_gem_object_lookup(file_priv, params->bo_handle); if (!new_bo) return -ENOENT; drm_modeset_lock_all(dev); if (i915_gem_object_is_tiled(new_bo)) { drm_dbg_kms(&dev_priv->drm, "buffer used for overlay image can not be tiled\n"); ret = -EINVAL; goto out_unlock; } ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) goto out_unlock; if (overlay->crtc != crtc) { ret = intel_overlay_switch_off(overlay); if (ret != 0) goto out_unlock; ret = check_overlay_possible_on_crtc(overlay, crtc); if (ret != 0) goto out_unlock; overlay->crtc = crtc; crtc->overlay = overlay; /* line too wide, i.e. one-line-mode */ if (drm_rect_width(&crtc->config->pipe_src) > 1024 && crtc->config->gmch_pfit.control & PFIT_ENABLE) { overlay->pfit_active = true; update_pfit_vscale_ratio(overlay); } else overlay->pfit_active = false; } ret = check_overlay_dst(overlay, params); if (ret != 0) goto out_unlock; if (overlay->pfit_active) { params->dst_y = (((u32)params->dst_y << 12) / overlay->pfit_vscale_ratio); /* shifting right rounds downwards, so add 1 */ params->dst_height = (((u32)params->dst_height << 12) / overlay->pfit_vscale_ratio) + 1; } if (params->src_scan_height > params->src_height || params->src_scan_width > params->src_width) { ret = -EINVAL; goto out_unlock; } ret = check_overlay_src(dev_priv, params, new_bo); if (ret != 0) goto out_unlock; /* Check scaling after src size to prevent a divide-by-zero. */ ret = check_overlay_scaling(params); if (ret != 0) goto out_unlock; ret = intel_overlay_do_put_image(overlay, new_bo, params); if (ret != 0) goto out_unlock; drm_modeset_unlock_all(dev); i915_gem_object_put(new_bo); return 0; out_unlock: drm_modeset_unlock_all(dev); i915_gem_object_put(new_bo); return ret; } static void update_reg_attrs(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff), &regs->OCLRC0); iowrite32(overlay->saturation, &regs->OCLRC1); } static bool check_gamma_bounds(u32 gamma1, u32 gamma2) { int i; if (gamma1 & 0xff000000 || gamma2 & 0xff000000) return false; for (i = 0; i < 3; i++) { if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) return false; } return true; } static bool check_gamma5_errata(u32 gamma5) { int i; for (i = 0; i < 3; i++) { if (((gamma5 >> i*8) & 0xff) == 0x80) return false; } return true; } static int check_gamma(struct drm_intel_overlay_attrs *attrs) { if (!check_gamma_bounds(0, attrs->gamma0) || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) return -EINVAL; if (!check_gamma5_errata(attrs->gamma5)) return -EINVAL; return 0; } int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; int ret; overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; } drm_modeset_lock_all(dev); ret = -EINVAL; if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { attrs->color_key = overlay->color_key; attrs->brightness = overlay->brightness; attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; if (DISPLAY_VER(dev_priv) != 2) { attrs->gamma0 = intel_de_read(dev_priv, OGAMC0); attrs->gamma1 = intel_de_read(dev_priv, OGAMC1); attrs->gamma2 = intel_de_read(dev_priv, OGAMC2); attrs->gamma3 = intel_de_read(dev_priv, OGAMC3); attrs->gamma4 = intel_de_read(dev_priv, OGAMC4); attrs->gamma5 = intel_de_read(dev_priv, OGAMC5); } } else { if (attrs->brightness < -128 || attrs->brightness > 127) goto out_unlock; if (attrs->contrast > 255) goto out_unlock; if (attrs->saturation > 1023) goto out_unlock; overlay->color_key = attrs->color_key; overlay->brightness = attrs->brightness; overlay->contrast = attrs->contrast; overlay->saturation = attrs->saturation; update_reg_attrs(overlay, overlay->regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (DISPLAY_VER(dev_priv) == 2) goto out_unlock; if (overlay->active) { ret = -EBUSY; goto out_unlock; } ret = check_gamma(attrs); if (ret) goto out_unlock; intel_de_write(dev_priv, OGAMC0, attrs->gamma0); intel_de_write(dev_priv, OGAMC1, attrs->gamma1); intel_de_write(dev_priv, OGAMC2, attrs->gamma2); intel_de_write(dev_priv, OGAMC3, attrs->gamma3); intel_de_write(dev_priv, OGAMC4, attrs->gamma4); intel_de_write(dev_priv, OGAMC5, attrs->gamma5); } } overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0; ret = 0; out_unlock: drm_modeset_unlock_all(dev); return ret; } static int get_registers(struct intel_overlay *overlay, bool use_phys) { struct drm_i915_private *i915 = overlay->i915; struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV); struct i915_vma *vma; int err; if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_put_bo; } if (use_phys) overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl); else overlay->flip_addr = i915_ggtt_offset(vma); overlay->regs = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(overlay->regs)) { err = PTR_ERR(overlay->regs); goto err_put_bo; } overlay->reg_bo = obj; return 0; err_put_bo: i915_gem_object_put(obj); return err; } void intel_overlay_setup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; struct intel_engine_cs *engine; int ret; if (!HAS_OVERLAY(dev_priv)) return; engine = to_gt(dev_priv)->engine[RCS0]; if (!engine || !engine->kernel_context) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; overlay->i915 = dev_priv; overlay->context = engine->kernel_context; overlay->color_key = 0x0101fe; overlay->color_key_enabled = true; overlay->brightness = -19; overlay->contrast = 75; overlay->saturation = 146; i915_active_init(&overlay->last_flip, NULL, intel_overlay_last_flip_retire, 0); ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); if (ret) goto out_free; memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(overlay->regs); update_reg_attrs(overlay, overlay->regs); dev_priv->display.overlay = overlay; drm_info(&dev_priv->drm, "Initialized overlay support.\n"); return; out_free: kfree(overlay); } void intel_overlay_cleanup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; overlay = fetch_and_zero(&dev_priv->display.overlay); if (!overlay) return; /* * The bo's should be free'd by the generic code already. * Furthermore modesetting teardown happens beforehand so the * hardware should be off already. */ drm_WARN_ON(&dev_priv->drm, overlay->active); i915_gem_object_put(overlay->reg_bo); i915_active_fini(&overlay->last_flip); kfree(overlay); } #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; u32 dovsta; u32 isr; }; struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay = dev_priv->display.overlay; struct intel_overlay_error_state *error; if (!overlay || !overlay->active) return NULL; error = kmalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; error->dovsta = intel_de_read(dev_priv, DOVSTA); error->isr = intel_de_read(dev_priv, GEN2_ISR); error->base = overlay->flip_addr; memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs)); return error; } void intel_overlay_print_error_state(struct drm_i915_error_state_buf *m, struct intel_overlay_error_state *error) { i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", error->dovsta, error->isr); i915_error_printf(m, " Register file at 0x%08lx:\n", error->base); #define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x) P(OBUF_0Y); P(OBUF_1Y); P(OBUF_0U); P(OBUF_0V); P(OBUF_1U); P(OBUF_1V); P(OSTRIDE); P(YRGB_VPH); P(UV_VPH); P(HORZ_PH); P(INIT_PHS); P(DWINPOS); P(DWINSZ); P(SWIDTH); P(SWIDTHSW); P(SHEIGHT); P(YRGBSCALE); P(UVSCALE); P(OCLRC0); P(OCLRC1); P(DCLRKV); P(DCLRKM); P(SCLRKVH); P(SCLRKVL); P(SCLRKEN); P(OCONFIG); P(OCMD); P(OSTART_0Y); P(OSTART_1Y); P(OSTART_0U); P(OSTART_0V); P(OSTART_1U); P(OSTART_1V); P(OTILEOFF_0Y); P(OTILEOFF_1Y); P(OTILEOFF_0U); P(OTILEOFF_0V); P(OTILEOFF_1U); P(OTILEOFF_1V); P(FASTHSCALE); P(UVSCALEV); #undef P } #endif
linux-master
drivers/gpu/drm/i915/display/intel_overlay.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include "gem/i915_gem_domain.h" #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gt/gen8_ppgtt.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" struct i915_dpt { struct i915_address_space vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; void __iomem *iomem; }; #define i915_is_dpt(vm) ((vm)->is_dpt) static inline struct i915_dpt * i915_vm_to_dpt(struct i915_address_space *vm) { BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); GEM_BUG_ON(!i915_is_dpt(vm)); return container_of(vm, struct i915_dpt, vm); } #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); } static void dpt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) { struct i915_dpt *dpt = i915_vm_to_dpt(vm); gen8_pte_t __iomem *base = dpt->iomem; gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, vm->pte_encode(addr, pat_index, flags)); } static void dpt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) { struct i915_dpt *dpt = i915_vm_to_dpt(vm); gen8_pte_t __iomem *base = dpt->iomem; const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags); struct sgt_iter sgt_iter; dma_addr_t addr; int i; /* * Note that we ignore PTE_READ_ONLY here. The caller must be careful * not to allow the user to override access to a read only page. */ i = vma_res->start / I915_GTT_PAGE_SIZE; for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages) gen8_set_pte(&base[i++], pte_encode | addr); } static void dpt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { } static void dpt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags) { u32 pte_flags; if (vma_res->bound_flags) return; /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ pte_flags = 0; if (vm->has_read_only && vma_res->bi.readonly) pte_flags |= PTE_READ_ONLY; if (vma_res->bi.lmem) pte_flags |= PTE_LM; vm->insert_entries(vm, vma_res, pat_index, pte_flags); vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; /* * Without aliasing PPGTT there's no difference between * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * upgrade to both bound if we bind either to avoid double-binding. */ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res) { vm->clear_range(vm, vma_res->start, vma_res->vma_size); } static void dpt_cleanup(struct i915_address_space *vm) { struct i915_dpt *dpt = i915_vm_to_dpt(vm); i915_gem_object_put(dpt->obj); } struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) { struct drm_i915_private *i915 = vm->i915; struct i915_dpt *dpt = i915_vm_to_dpt(vm); intel_wakeref_t wakeref; struct i915_vma *vma; void __iomem *iomem; struct i915_gem_ww_ctx ww; u64 pin_flags = 0; int err; if (i915_gem_object_is_stolen(dpt->obj)) pin_flags |= PIN_MAPPABLE; wakeref = intel_runtime_pm_get(&i915->runtime_pm); atomic_inc(&i915->gpu_error.pending_fb_pin); for_i915_gem_ww(&ww, err, true) { err = i915_gem_object_lock(dpt->obj, &ww); if (err) continue; vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096, pin_flags); if (IS_ERR(vma)) { err = PTR_ERR(vma); continue; } iomem = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(iomem)) { err = PTR_ERR(iomem); continue; } dpt->vma = vma; dpt->iomem = iomem; i915_vma_get(vma); } dpt->obj->mm.dirty = true; atomic_dec(&i915->gpu_error.pending_fb_pin); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err ? ERR_PTR(err) : vma; } void intel_dpt_unpin(struct i915_address_space *vm) { struct i915_dpt *dpt = i915_vm_to_dpt(vm); i915_vma_unpin_iomap(dpt->vma); i915_vma_put(dpt->vma); } /** * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume * @i915: device instance * * Restore the memory mapping during system resume for all framebuffers which * are mapped to HW via a GGTT->DPT page table. The content of these page * tables are not stored in the hibernation image during S4 and S3RST->S4 * transitions, so here we reprogram the PTE entries in those tables. * * This function must be called after the mappings in GGTT have been restored calling * i915_ggtt_resume(). */ void intel_dpt_resume(struct drm_i915_private *i915) { struct drm_framebuffer *drm_fb; if (!HAS_DISPLAY(i915)) return; mutex_lock(&i915->drm.mode_config.fb_lock); drm_for_each_fb(drm_fb, &i915->drm) { struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); if (fb->dpt_vm) i915_ggtt_resume_vm(fb->dpt_vm); } mutex_unlock(&i915->drm.mode_config.fb_lock); } /** * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend * @i915: device instance * * Suspend the memory mapping during system suspend for all framebuffers which * are mapped to HW via a GGTT->DPT page table. * * This function must be called before the mappings in GGTT are suspended calling * i915_ggtt_suspend(). */ void intel_dpt_suspend(struct drm_i915_private *i915) { struct drm_framebuffer *drm_fb; if (!HAS_DISPLAY(i915)) return; mutex_lock(&i915->drm.mode_config.fb_lock); drm_for_each_fb(drm_fb, &i915->drm) { struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); if (fb->dpt_vm) i915_ggtt_suspend_vm(fb->dpt_vm); } mutex_unlock(&i915->drm.mode_config.fb_lock); } struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb) { struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; struct drm_i915_private *i915 = to_i915(obj->dev); struct drm_i915_gem_object *dpt_obj; struct i915_address_space *vm; struct i915_dpt *dpt; size_t size; int ret; if (intel_fb_needs_pot_stride_remap(fb)) size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); else size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt)) dpt_obj = i915_gem_object_create_stolen(i915, size); if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) { drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n"); dpt_obj = i915_gem_object_create_shmem(i915, size); } if (IS_ERR(dpt_obj)) return ERR_CAST(dpt_obj); ret = i915_gem_object_lock_interruptible(dpt_obj, NULL); if (!ret) { ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); i915_gem_object_unlock(dpt_obj); } if (ret) { i915_gem_object_put(dpt_obj); return ERR_PTR(ret); } dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); if (!dpt) { i915_gem_object_put(dpt_obj); return ERR_PTR(-ENOMEM); } vm = &dpt->vm; vm->gt = to_gt(i915); vm->i915 = i915; vm->dma = i915->drm.dev; vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; vm->is_dpt = true; i915_address_space_init(vm, VM_CLASS_DPT); vm->insert_page = dpt_insert_page; vm->clear_range = dpt_clear_range; vm->insert_entries = dpt_insert_entries; vm->cleanup = dpt_cleanup; vm->vma_ops.bind_vma = dpt_bind_vma; vm->vma_ops.unbind_vma = dpt_unbind_vma; vm->pte_encode = vm->gt->ggtt->vm.pte_encode; dpt->obj = dpt_obj; dpt->obj->is_dpt = true; return &dpt->vm; } void intel_dpt_destroy(struct i915_address_space *vm) { struct i915_dpt *dpt = i915_vm_to_dpt(vm); dpt->obj->is_dpt = false; i915_vm_put(&dpt->vm); } void intel_dpt_configure(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (DISPLAY_VER(i915) == 14) { enum pipe pipe = crtc->pipe; enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { if (plane_id == PLANE_CURSOR) continue; intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), PLANE_CHICKEN_DISABLE_DPT, i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT); } } else if (DISPLAY_VER(i915) == 13) { intel_de_rmw(i915, CHICKEN_MISC_2, CHICKEN_MISC_DISABLE_DPT, i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT); } }
linux-master
drivers/gpu/drm/i915/display/intel_dpt.c
/* * Copyright © 2006-2010 Intel Corporation * Copyright (c) 2006 Dave Airlie <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * Dave Airlie <[email protected]> * Jesse Barnes <[email protected]> * Chris Wilson <[email protected]> */ #include <linux/kernel.h> #include <linux/pwm.h> #include <drm/drm_edid.h> #include "i915_reg.h" #include "intel_backlight.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" #include "intel_vrr.h" bool intel_panel_use_ssc(struct drm_i915_private *i915) { if (i915->params.panel_use_ssc >= 0) return i915->params.panel_use_ssc != 0; return i915->display.vbt.lvds_use_ssc && !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); } const struct drm_display_mode * intel_panel_preferred_fixed_mode(struct intel_connector *connector) { return list_first_entry_or_null(&connector->panel.fixed_modes, struct drm_display_mode, head); } static bool is_in_vrr_range(struct intel_connector *connector, int vrefresh) { const struct drm_display_info *info = &connector->base.display_info; return intel_vrr_is_capable(connector) && vrefresh >= info->monitor_range.min_vfreq && vrefresh <= info->monitor_range.max_vfreq; } static bool is_best_fixed_mode(struct intel_connector *connector, int vrefresh, int fixed_mode_vrefresh, const struct drm_display_mode *best_mode) { /* we want to always return something */ if (!best_mode) return true; /* * With VRR always pick a mode with equal/higher than requested * vrefresh, which we can then reduce to match the requested * vrefresh by extending the vblank length. */ if (is_in_vrr_range(connector, vrefresh) && is_in_vrr_range(connector, fixed_mode_vrefresh) && fixed_mode_vrefresh < vrefresh) return false; /* pick the fixed_mode that is closest in terms of vrefresh */ return abs(fixed_mode_vrefresh - vrefresh) < abs(drm_mode_vrefresh(best_mode) - vrefresh); } const struct drm_display_mode * intel_panel_fixed_mode(struct intel_connector *connector, const struct drm_display_mode *mode) { const struct drm_display_mode *fixed_mode, *best_mode = NULL; int vrefresh = drm_mode_vrefresh(mode); list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { int fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode); if (is_best_fixed_mode(connector, vrefresh, fixed_mode_vrefresh, best_mode)) best_mode = fixed_mode; } return best_mode; } static bool is_alt_drrs_mode(const struct drm_display_mode *mode, const struct drm_display_mode *preferred_mode) { return drm_mode_match(mode, preferred_mode, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS) && mode->clock != preferred_mode->clock; } static bool is_alt_fixed_mode(const struct drm_display_mode *mode, const struct drm_display_mode *preferred_mode) { u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC; return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) && mode->hdisplay == preferred_mode->hdisplay && mode->vdisplay == preferred_mode->vdisplay; } const struct drm_display_mode * intel_panel_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode) { const struct drm_display_mode *fixed_mode, *best_mode = NULL; int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate; int max_vrefresh = drm_mode_vrefresh(adjusted_mode); /* pick the fixed_mode with the lowest refresh rate */ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { int vrefresh = drm_mode_vrefresh(fixed_mode); if (is_alt_drrs_mode(fixed_mode, adjusted_mode) && vrefresh >= min_vrefresh && vrefresh < max_vrefresh) { max_vrefresh = vrefresh; best_mode = fixed_mode; } } return best_mode; } const struct drm_display_mode * intel_panel_highest_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode) { const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode; /* pick the fixed_mode that has the highest clock */ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { if (fixed_mode->clock > best_mode->clock) best_mode = fixed_mode; } return best_mode; } int intel_panel_get_modes(struct intel_connector *connector) { const struct drm_display_mode *fixed_mode; int num_modes = 0; list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->base.dev, fixed_mode); if (mode) { drm_mode_probed_add(&connector->base, mode); num_modes++; } } return num_modes; } static bool has_drrs_modes(struct intel_connector *connector) { const struct drm_display_mode *mode1; list_for_each_entry(mode1, &connector->panel.fixed_modes, head) { const struct drm_display_mode *mode2 = mode1; list_for_each_entry_continue(mode2, &connector->panel.fixed_modes, head) { if (is_alt_drrs_mode(mode1, mode2)) return true; } } return false; } enum drrs_type intel_panel_drrs_type(struct intel_connector *connector) { return connector->panel.vbt.drrs_type; } int intel_panel_compute_config(struct intel_connector *connector, struct drm_display_mode *adjusted_mode) { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); int vrefresh, fixed_mode_vrefresh; bool is_vrr; if (!fixed_mode) return 0; vrefresh = drm_mode_vrefresh(adjusted_mode); fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode); /* * Assume that we shouldn't muck about with the * timings if they don't land in the VRR range. */ is_vrr = is_in_vrr_range(connector, vrefresh) && is_in_vrr_range(connector, fixed_mode_vrefresh); if (!is_vrr) { /* * We don't want to lie too much to the user about the refresh * rate they're going to get. But we have to allow a bit of latitude * for Xorg since it likes to automagically cook up modes with slightly * off refresh rates. */ if (abs(vrefresh - fixed_mode_vrefresh) > 1) { drm_dbg_kms(connector->base.dev, "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n", connector->base.base.id, connector->base.name, vrefresh, fixed_mode_vrefresh); return -EINVAL; } } drm_mode_copy(adjusted_mode, fixed_mode); if (is_vrr && fixed_mode_vrefresh != vrefresh) adjusted_mode->vtotal = DIV_ROUND_CLOSEST(adjusted_mode->clock * 1000, adjusted_mode->htotal * vrefresh); drm_mode_set_crtcinfo(adjusted_mode, 0); return 0; } static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct drm_display_mode *preferred_mode = intel_panel_preferred_fixed_mode(connector); struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { if (!is_alt_fixed_mode(mode, preferred_mode)) continue; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); list_move_tail(&mode->head, &connector->panel.fixed_modes); } } static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_display_mode *scan, *fixed_mode = NULL; if (list_empty(&connector->base.probed_modes)) return; /* make sure the preferred mode is first */ list_for_each_entry(scan, &connector->base.probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { fixed_mode = scan; break; } } if (!fixed_mode) fixed_mode = list_first_entry(&connector->base.probed_modes, typeof(*fixed_mode), head); drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first", DRM_MODE_ARG(fixed_mode)); fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; list_move_tail(&fixed_mode->head, &connector->panel.fixed_modes); } static void intel_panel_destroy_probed_modes(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, DRM_MODE_ARG(mode)); list_del(&mode->head); drm_mode_destroy(&i915->drm, mode); } } void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool use_alt_fixed_modes) { intel_panel_add_edid_preferred_mode(connector); if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes) intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_destroy_probed_modes(connector); } static void intel_panel_add_fixed_mode(struct intel_connector *connector, struct drm_display_mode *fixed_mode, const char *type) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_display_info *info = &connector->base.display_info; if (!fixed_mode) return; fixed_mode->type |= DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; info->width_mm = fixed_mode->width_mm; info->height_mm = fixed_mode->height_mm; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n", connector->base.base.id, connector->base.name, type, DRM_MODE_ARG(fixed_mode)); list_add_tail(&fixed_mode->head, &connector->panel.fixed_modes); } void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *mode; mode = connector->panel.vbt.lfp_lvds_vbt_mode; if (!mode) return; intel_panel_add_fixed_mode(connector, drm_mode_duplicate(&i915->drm, mode), "VBT LFP"); } void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *mode; mode = connector->panel.vbt.sdvo_lvds_vbt_mode; if (!mode) return; intel_panel_add_fixed_mode(connector, drm_mode_duplicate(&i915->drm, mode), "VBT SDVO"); } void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, struct intel_encoder *encoder) { intel_panel_add_fixed_mode(connector, intel_encoder_current_mode(encoder), "current (BIOS)"); } /* adjusted_mode has been preset to be the panel's fixed mode */ static int pch_panel_fitting(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); int x, y, width, height; /* Native modes don't need fitting */ if (adjusted_mode->crtc_hdisplay == pipe_src_w && adjusted_mode->crtc_vdisplay == pipe_src_h && crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) return 0; switch (conn_state->scaling_mode) { case DRM_MODE_SCALE_CENTER: width = pipe_src_w; height = pipe_src_h; x = (adjusted_mode->crtc_hdisplay - width + 1)/2; y = (adjusted_mode->crtc_vdisplay - height + 1)/2; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ { u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; if (scaled_width > scaled_height) { /* pillar */ width = scaled_height / pipe_src_h; if (width & 1) width++; x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; y = 0; height = adjusted_mode->crtc_vdisplay; } else if (scaled_width < scaled_height) { /* letter */ height = scaled_width / pipe_src_w; if (height & 1) height++; y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; x = 0; width = adjusted_mode->crtc_hdisplay; } else { x = y = 0; width = adjusted_mode->crtc_hdisplay; height = adjusted_mode->crtc_vdisplay; } } break; case DRM_MODE_SCALE_NONE: WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w); WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h); fallthrough; case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->crtc_hdisplay; height = adjusted_mode->crtc_vdisplay; break; default: MISSING_CASE(conn_state->scaling_mode); return -EINVAL; } drm_rect_init(&crtc_state->pch_pfit.dst, x, y, width, height); crtc_state->pch_pfit.enabled = true; return 0; } static void centre_horizontally(struct drm_display_mode *adjusted_mode, int width) { u32 border, sync_pos, blank_width, sync_width; /* keep the hsync and hblank widths constant */ sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; sync_pos = (blank_width - sync_width + 1) / 2; border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; border += border & 1; /* make the border even */ adjusted_mode->crtc_hdisplay = width; adjusted_mode->crtc_hblank_start = width + border; adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; } static void centre_vertically(struct drm_display_mode *adjusted_mode, int height) { u32 border, sync_pos, blank_width, sync_width; /* keep the vsync and vblank widths constant */ sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; sync_pos = (blank_width - sync_width + 1) / 2; border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; adjusted_mode->crtc_vdisplay = height; adjusted_mode->crtc_vblank_start = height + border; adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; } static u32 panel_fitter_scaling(u32 source, u32 target) { /* * Floating point operation is not supported. So the FACTOR * is defined, which can avoid the floating point computation * when calculating the panel ratio. */ #define ACCURACY 12 #define FACTOR (1 << ACCURACY) u32 ratio = source * FACTOR / target; return (FACTOR * ratio + FACTOR/2) / FACTOR; } static void i965_scale_aspect(struct intel_crtc_state *crtc_state, u32 *pfit_control) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; /* 965+ is easy, it does everything in hw */ if (scaled_width > scaled_height) *pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; else if (scaled_width < scaled_height) *pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; else if (adjusted_mode->crtc_hdisplay != pipe_src_w) *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; } static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, u32 *pfit_control, u32 *pfit_pgm_ratios, u32 *border) { struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h; u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay; u32 bits; /* * For earlier chips we have to calculate the scaling * ratio by hand and program it into the * PFIT_PGM_RATIO register */ if (scaled_width > scaled_height) { /* pillar */ centre_horizontally(adjusted_mode, scaled_height / pipe_src_h); *border = LVDS_BORDER_ENABLE; if (pipe_src_h != adjusted_mode->crtc_vdisplay) { bits = panel_fitter_scaling(pipe_src_h, adjusted_mode->crtc_vdisplay); *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | PFIT_VERT_SCALE(bits)); *pfit_control |= (PFIT_ENABLE | PFIT_VERT_INTERP_BILINEAR | PFIT_HORIZ_INTERP_BILINEAR); } } else if (scaled_width < scaled_height) { /* letter */ centre_vertically(adjusted_mode, scaled_width / pipe_src_w); *border = LVDS_BORDER_ENABLE; if (pipe_src_w != adjusted_mode->crtc_hdisplay) { bits = panel_fitter_scaling(pipe_src_w, adjusted_mode->crtc_hdisplay); *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) | PFIT_VERT_SCALE(bits)); *pfit_control |= (PFIT_ENABLE | PFIT_VERT_INTERP_BILINEAR | PFIT_HORIZ_INTERP_BILINEAR); } } else { /* Aspects match, Let hw scale both directions */ *pfit_control |= (PFIT_ENABLE | PFIT_VERT_AUTO_SCALE | PFIT_HORIZ_AUTO_SCALE | PFIT_VERT_INTERP_BILINEAR | PFIT_HORIZ_INTERP_BILINEAR); } } static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); /* Native modes don't need fitting */ if (adjusted_mode->crtc_hdisplay == pipe_src_w && adjusted_mode->crtc_vdisplay == pipe_src_h) goto out; switch (conn_state->scaling_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. */ centre_horizontally(adjusted_mode, pipe_src_w); centre_vertically(adjusted_mode, pipe_src_h); border = LVDS_BORDER_ENABLE; break; case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ if (DISPLAY_VER(dev_priv) >= 4) i965_scale_aspect(crtc_state, &pfit_control); else i9xx_scale_aspect(crtc_state, &pfit_control, &pfit_pgm_ratios, &border); break; case DRM_MODE_SCALE_FULLSCREEN: /* * Full scaling, even if it changes the aspect ratio. * Fortunately this is all done for us in hw. */ if (pipe_src_h != adjusted_mode->crtc_vdisplay || pipe_src_w != adjusted_mode->crtc_hdisplay) { pfit_control |= PFIT_ENABLE; if (DISPLAY_VER(dev_priv) >= 4) pfit_control |= PFIT_SCALING_AUTO; else pfit_control |= (PFIT_VERT_AUTO_SCALE | PFIT_VERT_INTERP_BILINEAR | PFIT_HORIZ_AUTO_SCALE | PFIT_HORIZ_INTERP_BILINEAR); } break; default: MISSING_CASE(conn_state->scaling_mode); return -EINVAL; } /* 965+ wants fuzzy fitting */ /* FIXME: handle multiple panels by failing gracefully */ if (DISPLAY_VER(dev_priv) >= 4) pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY; out: if ((pfit_control & PFIT_ENABLE) == 0) { pfit_control = 0; pfit_pgm_ratios = 0; } /* Make sure pre-965 set dither correctly for 18bpp panels. */ if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18) pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE; crtc_state->gmch_pfit.control = pfit_control; crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios; crtc_state->gmch_pfit.lvds_border_bits = border; return 0; } int intel_panel_fitting(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (HAS_GMCH(i915)) return gmch_panel_fitting(crtc_state, conn_state); else return pch_panel_fitting(crtc_state, conn_state); } enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *i915 = to_i915(connector->dev); if (!INTEL_DISPLAY_ENABLED(i915)) return connector_status_disconnected; return connector_status_connected; } enum drm_mode_status intel_panel_mode_valid(struct intel_connector *connector, const struct drm_display_mode *mode) { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, mode); if (!fixed_mode) return MODE_OK; if (mode->hdisplay != fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay != fixed_mode->vdisplay) return MODE_PANEL; if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(fixed_mode)) return MODE_PANEL; return MODE_OK; } void intel_panel_init_alloc(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; connector->panel.vbt.panel_type = -1; connector->panel.vbt.backlight.controller = -1; INIT_LIST_HEAD(&panel->fixed_modes); } int intel_panel_init(struct intel_connector *connector, const struct drm_edid *fixed_edid) { struct intel_panel *panel = &connector->panel; panel->fixed_edid = fixed_edid; intel_backlight_init_funcs(panel); if (!has_drrs_modes(connector)) connector->panel.vbt.drrs_type = DRRS_TYPE_NONE; drm_dbg_kms(connector->base.dev, "[CONNECTOR:%d:%s] DRRS type: %s\n", connector->base.base.id, connector->base.name, intel_drrs_type_str(intel_panel_drrs_type(connector))); return 0; } void intel_panel_fini(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; struct drm_display_mode *fixed_mode, *next; if (!IS_ERR_OR_NULL(panel->fixed_edid)) drm_edid_free(panel->fixed_edid); intel_backlight_destroy(panel); intel_bios_fini_panel(panel); list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) { list_del(&fixed_mode->head); drm_mode_destroy(connector->base.dev, fixed_mode); } }
linux-master
drivers/gpu/drm/i915/display/intel_panel.c
/* * Copyright © 2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Deepak M <m.deepak at intel.com> */ #include <drm/drm_mipi_dsi.h> #include <video/mipi_display.h> #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_dcs_backlight.h" #define CONTROL_DISPLAY_BCTRL (1 << 5) #define CONTROL_DISPLAY_DD (1 << 3) #define CONTROL_DISPLAY_BL (1 << 2) #define POWER_SAVE_OFF (0 << 0) #define POWER_SAVE_LOW (1 << 0) #define POWER_SAVE_MEDIUM (2 << 0) #define POWER_SAVE_HIGH (3 << 0) #define POWER_SAVE_OUTDOOR_MODE (4 << 0) #define PANEL_PWM_MAX_VALUE 0xFF static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused) { struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_panel *panel = &connector->panel; struct mipi_dsi_device *dsi_device; u8 data[2] = {}; enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &data, len); break; } return (data[1] << 8) | data[0]; } static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; u8 data[2] = {}; enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; unsigned long mode_flags; if (len == 1) { data[0] = level; } else { data[0] = level >> 8; data[1] = level; } for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mode_flags = dsi_device->mode_flags; dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, &data, len); dsi_device->mode_flags = mode_flags; } } static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; dcs_set_backlight(conn_state, 0); for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_OFF; dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, &cabc, sizeof(cabc)); } for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, &ctrl, sizeof(ctrl)); ctrl &= ~CONTROL_DISPLAY_BL; ctrl &= ~CONTROL_DISPLAY_DD; ctrl &= ~CONTROL_DISPLAY_BCTRL; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, &ctrl, sizeof(ctrl)); } } static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, &ctrl, sizeof(ctrl)); ctrl |= CONTROL_DISPLAY_BL; ctrl |= CONTROL_DISPLAY_DD; ctrl |= CONTROL_DISPLAY_BCTRL; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, &ctrl, sizeof(ctrl)); } for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_MEDIUM; dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, &cabc, sizeof(cabc)); } dcs_set_backlight(conn_state, level); } static int dcs_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (panel->vbt.backlight.brightness_precision_bits > 8) panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1; else panel->backlight.max = PANEL_PWM_MAX_VALUE; panel->backlight.level = panel->backlight.max; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using DCS for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static const struct intel_panel_bl_funcs dcs_bl_funcs = { .setup = dcs_setup_backlight, .enable = dcs_enable_backlight, .disable = dcs_disable_backlight, .set = dcs_set_backlight, .get = dcs_get_backlight, }; int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; struct intel_encoder *encoder = intel_attached_encoder(intel_connector); struct intel_panel *panel = &intel_connector->panel; if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) return -ENODEV; if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI)) return -EINVAL; panel->backlight.funcs = &dcs_bl_funcs; return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_gmbus.h" #include "intel_hotplug.h" #include "intel_hotplug_irq.h" typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); typedef u32 (*hotplug_mask_func)(enum hpd_pin pin); static const u32 hpd_ilk[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG, }; static const u32 hpd_ivb[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, }; static const u32 hpd_bdw[HPD_NUM_PINS] = { [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), }; static const u32 hpd_ibx[HPD_NUM_PINS] = { [HPD_CRT] = SDE_CRT_HOTPLUG, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, [HPD_PORT_B] = SDE_PORTB_HOTPLUG, [HPD_PORT_C] = SDE_PORTC_HOTPLUG, [HPD_PORT_D] = SDE_PORTD_HOTPLUG, }; static const u32 hpd_cpt[HPD_NUM_PINS] = { [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, }; static const u32 hpd_spt[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, }; static const u32 hpd_mask_i915[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_EN, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, }; static const u32 hpd_status_g4x[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, }; static const u32 hpd_status_i915[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, }; static const u32 hpd_bxt[HPD_NUM_PINS] = { [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), }; static const u32 hpd_gen11[HPD_NUM_PINS] = { [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), }; static const u32 hpd_xelpdp[HPD_NUM_PINS] = { [HPD_PORT_TC1] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC1) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC1), [HPD_PORT_TC2] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC2) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC2), [HPD_PORT_TC3] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC3) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC3), [HPD_PORT_TC4] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC4) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC4), }; static const u32 hpd_icp[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), }; static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), }; static const u32 hpd_mtp[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), }; static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) { struct intel_hotplug *hpd = &dev_priv->display.hotplug; if (HAS_GMCH(dev_priv)) { if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) hpd->hpd = hpd_status_g4x; else hpd->hpd = hpd_status_i915; return; } if (DISPLAY_VER(dev_priv) >= 14) hpd->hpd = hpd_xelpdp; else if (DISPLAY_VER(dev_priv) >= 11) hpd->hpd = hpd_gen11; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) hpd->hpd = hpd_bxt; else if (DISPLAY_VER(dev_priv) == 9) hpd->hpd = NULL; /* no north HPD on SKL */ else if (DISPLAY_VER(dev_priv) >= 8) hpd->hpd = hpd_bdw; else if (DISPLAY_VER(dev_priv) >= 7) hpd->hpd = hpd_ivb; else hpd->hpd = hpd_ilk; if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) return; if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) hpd->pch_hpd = hpd_sde_dg1; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) hpd->pch_hpd = hpd_mtp; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) hpd->pch_hpd = hpd_icp; else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) hpd->pch_hpd = hpd_spt; else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) hpd->pch_hpd = hpd_cpt; else if (HAS_PCH_IBX(dev_priv)) hpd->pch_hpd = hpd_ibx; else MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); } /* For display hotplug interrupt */ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, u32 mask, u32 bits) { lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, bits & ~mask); intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits); } /** * i915_hotplug_interrupt_update - update hotplug interrupt enable * @dev_priv: driver private * @mask: bits to update * @bits: bits to enable * NOTE: the HPD enable bits are modified both inside and outside * of an interrupt context. To avoid that read-modify-write cycles * interfer, these bits are protected by a spinlock. Since this * function is usually not called from a context where the lock is * held already, this function acquires the lock itself. A non-locking * version is also available. */ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, u32 mask, u32 bits) { spin_lock_irq(&dev_priv->irq_lock); i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); spin_unlock_irq(&dev_priv->irq_lock); } static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); default: return false; } } static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; default: return false; } } static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: case HPD_PORT_B: case HPD_PORT_C: case HPD_PORT_D: return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); default: return false; } } static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: return val & ICP_TC_HPD_LONG_DETECT(pin); default: return false; } } static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_E: return val & PORTE_HOTPLUG_LONG_DETECT; default: return false; } } static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; default: return false; } } static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_B: return val & PORTB_HOTPLUG_INT_LONG_PULSE; case HPD_PORT_C: return val & PORTC_HOTPLUG_INT_LONG_PULSE; case HPD_PORT_D: return val & PORTD_HOTPLUG_INT_LONG_PULSE; default: return false; } } /* * Get a bit mask of pins that have triggered, and which ones may be long. * This can be called multiple times with the same masks to accumulate * hotplug detection results from several registers. * * Note that the caller is expected to zero out the masks initially. */ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS], bool long_pulse_detect(enum hpd_pin pin, u32 val)) { enum hpd_pin pin; BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); for_each_hpd_pin(pin) { if ((hpd[pin] & hotplug_trigger) == 0) continue; *pin_mask |= BIT(pin); if (long_pulse_detect(pin, dig_hotplug_reg)) *long_mask |= BIT(pin); } drm_dbg(&dev_priv->drm, "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 enabled_irqs = 0; for_each_intel_encoder(&dev_priv->drm, encoder) if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 hotplug_irqs = 0; for_each_intel_encoder(&dev_priv->drm, encoder) hotplug_irqs |= hpd[encoder->hpd_pin]; return hotplug_irqs; } static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, hotplug_mask_func hotplug_mask) { enum hpd_pin pin; u32 hotplug = 0; for_each_hpd_pin(pin) hotplug |= hotplug_mask(pin); return hotplug; } static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, hotplug_enables_func hotplug_enables) { struct intel_encoder *encoder; u32 hotplug = 0; for_each_intel_encoder(&i915->drm, encoder) hotplug |= hotplug_enables(encoder); return hotplug; } u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { u32 hotplug_status = 0, hotplug_status_mask; int i; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; else hotplug_status_mask = HOTPLUG_INT_STATUS_I915; /* * We absolutely have to clear all the pending interrupt * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port * interrupt bit won't have an edge, and the i965/g4x * edge triggered IIR will not notice that an interrupt * is still pending. We can't use PORT_HOTPLUG_EN to * guarantee the edge as the act of toggling the enable * bits can itself generate a new hotplug interrupt :( */ for (i = 0; i < 10; i++) { u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; if (tmp == 0) return hotplug_status; hotplug_status |= tmp; intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); } drm_WARN_ONCE(&dev_priv->drm, 1, "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); return hotplug_status; } void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) { u32 pin_mask = 0, long_mask = 0; u32 hotplug_trigger; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; else hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; if (hotplug_trigger) { intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, dev_priv->display.hotplug.hpd, i9xx_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) intel_dp_aux_irq_handler(dev_priv); } void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* * Somehow the PCH doesn't seem to really ack the interrupt to the CPU * unless we touch the hotplug register, even if hotplug_trigger is * zero. Not acking leads to "The master control interrupt lied (SDE)!" * errors. */ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); if (!hotplug_trigger) { u32 mask = PORTA_HOTPLUG_STATUS_MASK | PORTD_HOTPLUG_STATUS_MASK | PORTC_HOTPLUG_STATUS_MASK | PORTB_HOTPLUG_STATUS_MASK; dig_hotplug_reg &= ~mask; } intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); if (!hotplug_trigger) return; intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.pch_hpd, pch_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) { enum hpd_pin pin; u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK); u32 trigger_aux = iir & XELPDP_AUX_TC_MASK; u32 pin_mask = 0, long_mask = 0; for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) { u32 val; if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger)) continue; pin_mask |= BIT(pin); val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin)); intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val); if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT)) long_mask |= BIT(pin); } if (pin_mask) { drm_dbg(&i915->drm, "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", hotplug_trigger, pin_mask, long_mask); intel_hpd_irq_handler(i915, pin_mask, long_mask); } if (trigger_aux) intel_dp_aux_irq_handler(i915); if (!pin_mask && !trigger_aux) drm_err(&i915->drm, "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir); } void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; /* Locking due to DSI native GPIO sequences */ spin_lock(&dev_priv->irq_lock); dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); spin_unlock(&dev_priv->irq_lock); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.pch_hpd, icp_ddi_port_hotplug_long_detect); } if (tc_hotplug_trigger) { u32 dig_hotplug_reg; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.pch_hpd, icp_tc_port_hotplug_long_detect); } if (pin_mask) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_ICP) intel_gmbus_irq_handler(dev_priv); } void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; u32 pin_mask = 0, long_mask = 0; if (hotplug_trigger) { u32 dig_hotplug_reg; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.pch_hpd, spt_port_hotplug_long_detect); } if (hotplug2_trigger) { u32 dig_hotplug_reg; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, dev_priv->display.hotplug.pch_hpd, spt_port_hotplug2_long_detect); } if (pin_mask) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) intel_gmbus_irq_handler(dev_priv); } void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.hpd, ilk_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, dev_priv->display.hotplug.hpd, bxt_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) { u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; if (trigger_tc) { u32 dig_hotplug_reg; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } if (trigger_tbt) { u32 dig_hotplug_reg; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } if (pin_mask) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); else drm_err(&dev_priv->drm, "Unexpected DE HPD interrupt 0x%08x\n", iir); } static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: return PORTA_HOTPLUG_ENABLE; case HPD_PORT_B: return PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_MASK; case HPD_PORT_C: return PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_MASK; case HPD_PORT_D: return PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_MASK; default: return 0; } } static u32 ibx_hotplug_enables(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); switch (encoder->hpd_pin) { case HPD_PORT_A: /* * When CPU and PCH are on the same package, port A * HPD must be enabled in both north and south. */ return HAS_PCH_LPT_LP(i915) ? PORTA_HOTPLUG_ENABLE : 0; case HPD_PORT_B: return PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; case HPD_PORT_C: return PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; case HPD_PORT_D: return PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; default: return 0; } } static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) { /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec). * The pulse duration bits are reserved on LPT+. */ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); } static void ibx_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, ibx_hotplug_mask(encoder->hpd_pin), ibx_hotplug_enables(encoder)); } static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); ibx_hpd_detection_setup(dev_priv); } static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: case HPD_PORT_B: case HPD_PORT_C: case HPD_PORT_D: return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin); default: return 0; } } static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) { return icp_ddi_hotplug_mask(encoder->hpd_pin); } static u32 icp_tc_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: return ICP_TC_HPD_ENABLE(hpd_pin); default: return 0; } } static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) { return icp_tc_hotplug_mask(encoder->hpd_pin); } static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); } static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI, icp_ddi_hotplug_mask(encoder->hpd_pin), icp_ddi_hotplug_enables(encoder)); } static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); } static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC, icp_tc_hotplug_mask(encoder->hpd_pin), icp_tc_hotplug_enables(encoder)); } static void icp_hpd_enable_detection(struct intel_encoder *encoder) { icp_ddi_hpd_enable_detection(encoder); icp_tc_hpd_enable_detection(encoder); } static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); else intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); icp_ddi_hpd_detection_setup(dev_priv); icp_tc_hpd_detection_setup(dev_priv); } static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: return GEN11_HOTPLUG_CTL_ENABLE(hpd_pin); default: return 0; } } static u32 gen11_hotplug_enables(struct intel_encoder *encoder) { return gen11_hotplug_mask(encoder->hpd_pin); } static void dg1_hpd_invert(struct drm_i915_private *i915) { u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | INVERT_DDID_HPD); intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); } static void dg1_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); dg1_hpd_invert(i915); icp_hpd_enable_detection(encoder); } static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) { dg1_hpd_invert(dev_priv); icp_hpd_irq_setup(dev_priv); } static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); } static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL, gen11_hotplug_mask(encoder->hpd_pin), gen11_hotplug_enables(encoder)); } static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); } static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL, gen11_hotplug_mask(encoder->hpd_pin), gen11_hotplug_enables(encoder)); } static void gen11_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); gen11_tc_hpd_enable_detection(encoder); gen11_tbt_hpd_enable_detection(encoder); if (INTEL_PCH_TYPE(i915) >= PCH_ICP) icp_hpd_enable_detection(encoder); } static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); gen11_tc_hpd_detection_setup(dev_priv); gen11_tbt_hpd_detection_setup(dev_priv); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_hpd_irq_setup(dev_priv); } static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: case HPD_PORT_B: return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin); default: return 0; } } static u32 mtp_ddi_hotplug_enables(struct intel_encoder *encoder) { return mtp_ddi_hotplug_mask(encoder->hpd_pin); } static u32 mtp_tc_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: return ICP_TC_HPD_ENABLE(hpd_pin); default: return 0; } } static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder) { return mtp_tc_hotplug_mask(encoder->hpd_pin); } static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915) { intel_de_rmw(i915, SHOTPLUG_CTL_DDI, intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask), intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables)); } static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_de_rmw(i915, SHOTPLUG_CTL_DDI, mtp_ddi_hotplug_mask(encoder->hpd_pin), mtp_ddi_hotplug_enables(encoder)); } static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915) { intel_de_rmw(i915, SHOTPLUG_CTL_TC, intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask), intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables)); } static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_de_rmw(i915, SHOTPLUG_CTL_DDI, mtp_tc_hotplug_mask(encoder->hpd_pin), mtp_tc_hotplug_enables(encoder)); } static void mtp_hpd_invert(struct drm_i915_private *i915) { u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | INVERT_TC1_HPD | INVERT_TC2_HPD | INVERT_TC3_HPD | INVERT_TC4_HPD | INVERT_DDID_HPD_MTP | INVERT_DDIE_HPD); intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val); } static void mtp_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); mtp_hpd_invert(i915); mtp_ddi_hpd_enable_detection(encoder); mtp_tc_hpd_enable_detection(encoder); } static void mtp_hpd_irq_setup(struct drm_i915_private *i915) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); mtp_hpd_invert(i915); ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); mtp_ddi_hpd_detection_setup(i915); mtp_tc_hpd_detection_setup(i915); } static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) { return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4; } static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, enum hpd_pin hpd_pin, bool enable) { u32 mask = XELPDP_TBT_HOTPLUG_ENABLE | XELPDP_DP_ALT_HOTPLUG_ENABLE; if (!is_xelpdp_pica_hpd_pin(hpd_pin)) return; intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), mask, enable ? mask : 0); } static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true); } static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) { struct intel_encoder *encoder; u32 available_pins = 0; enum hpd_pin pin; BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS); for_each_intel_encoder(&i915->drm, encoder) available_pins |= BIT(encoder->hpd_pin); for_each_hpd_pin(pin) _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin)); } static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) { xelpdp_pica_hpd_enable_detection(encoder); mtp_hpd_enable_detection(encoder); } static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd); intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR); xelpdp_pica_hpd_detection_setup(i915); if (INTEL_PCH_TYPE(i915) >= PCH_MTP) mtp_hpd_irq_setup(i915); } static u32 spt_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: return PORTA_HOTPLUG_ENABLE; case HPD_PORT_B: return PORTB_HOTPLUG_ENABLE; case HPD_PORT_C: return PORTC_HOTPLUG_ENABLE; case HPD_PORT_D: return PORTD_HOTPLUG_ENABLE; default: return 0; } } static u32 spt_hotplug_enables(struct intel_encoder *encoder) { return spt_hotplug_mask(encoder->hpd_pin); } static u32 spt_hotplug2_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_E: return PORTE_HOTPLUG_ENABLE; default: return 0; } } static u32 spt_hotplug2_enables(struct intel_encoder *encoder) { return spt_hotplug2_mask(encoder->hpd_pin); } static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(dev_priv)) { intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, CHASSIS_CLK_REQ_DURATION(0xf)); } /* Enable digital hotplug on the PCH */ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); } static void spt_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(i915)) { intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, CHASSIS_CLK_REQ_DURATION(0xf)); } intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, spt_hotplug_mask(encoder->hpd_pin), spt_hotplug_enables(encoder)); intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2, spt_hotplug2_mask(encoder->hpd_pin), spt_hotplug2_enables(encoder)); } static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); spt_hpd_detection_setup(dev_priv); } static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: return DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK; default: return 0; } } static u32 ilk_hotplug_enables(struct intel_encoder *encoder) { switch (encoder->hpd_pin) { case HPD_PORT_A: return DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; default: return 0; } } static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) { /* * Enable digital hotplug on the CPU, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * The pulse duration bits are reserved on HSW+. */ intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); } static void ilk_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, ilk_hotplug_mask(encoder->hpd_pin), ilk_hotplug_enables(encoder)); ibx_hpd_enable_detection(encoder); } static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); if (DISPLAY_VER(dev_priv) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); else ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); ilk_hpd_detection_setup(dev_priv); ibx_hpd_irq_setup(dev_priv); } static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin) { switch (hpd_pin) { case HPD_PORT_A: return PORTA_HOTPLUG_ENABLE | BXT_DDIA_HPD_INVERT; case HPD_PORT_B: return PORTB_HOTPLUG_ENABLE | BXT_DDIB_HPD_INVERT; case HPD_PORT_C: return PORTC_HOTPLUG_ENABLE | BXT_DDIC_HPD_INVERT; default: return 0; } } static u32 bxt_hotplug_enables(struct intel_encoder *encoder) { u32 hotplug; switch (encoder->hpd_pin) { case HPD_PORT_A: hotplug = PORTA_HOTPLUG_ENABLE; if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIA_HPD_INVERT; return hotplug; case HPD_PORT_B: hotplug = PORTB_HOTPLUG_ENABLE; if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIB_HPD_INVERT; return hotplug; case HPD_PORT_C: hotplug = PORTC_HOTPLUG_ENABLE; if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIC_HPD_INVERT; return hotplug; default: return 0; } } static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) { intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); } static void bxt_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, bxt_hotplug_mask(encoder->hpd_pin), bxt_hotplug_enables(encoder)); } static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); bxt_hpd_detection_setup(dev_priv); } static void i915_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; /* HPD sense and interrupt enable are one and the same */ i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); } static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_en; lockdep_assert_held(&dev_priv->irq_lock); /* * Note HDMI and DP share hotplug bits. Enable bits are the same for all * generations. */ hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); /* * Programming the CRT detection parameters tends to generate a spurious * hotplug event about three seconds later. So just do it once. */ if (IS_G4X(dev_priv)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; /* Ignore TV since it's buggy */ i915_hotplug_interrupt_update_locked(dev_priv, HOTPLUG_INT_EN_MASK | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | CRT_HOTPLUG_ACTIVATION_PERIOD_64, hotplug_en); } struct intel_hotplug_funcs { /* Enable HPD sense and interrupts for all present encoders */ void (*hpd_irq_setup)(struct drm_i915_private *i915); /* Enable HPD sense for a single encoder */ void (*hpd_enable_detection)(struct intel_encoder *encoder); }; #define HPD_FUNCS(platform) \ static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ .hpd_irq_setup = platform##_hpd_irq_setup, \ .hpd_enable_detection = platform##_hpd_enable_detection, \ } HPD_FUNCS(i915); HPD_FUNCS(xelpdp); HPD_FUNCS(dg1); HPD_FUNCS(gen11); HPD_FUNCS(bxt); HPD_FUNCS(icp); HPD_FUNCS(spt); HPD_FUNCS(ilk); #undef HPD_FUNCS void intel_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (i915->display.funcs.hotplug) i915->display.funcs.hotplug->hpd_enable_detection(encoder); } void intel_hpd_irq_setup(struct drm_i915_private *i915) { if (i915->display_irqs_enabled && i915->display.funcs.hotplug) i915->display.funcs.hotplug->hpd_irq_setup(i915); } void intel_hotplug_irq_init(struct drm_i915_private *i915) { intel_hpd_init_pins(i915); intel_hpd_init_early(i915); if (HAS_GMCH(i915)) { if (I915_HAS_HOTPLUG(i915)) i915->display.funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(i915)) i915->display.funcs.hotplug = &icp_hpd_funcs; else if (HAS_PCH_DG1(i915)) i915->display.funcs.hotplug = &dg1_hpd_funcs; else if (DISPLAY_VER(i915) >= 14) i915->display.funcs.hotplug = &xelpdp_hpd_funcs; else if (DISPLAY_VER(i915) >= 11) i915->display.funcs.hotplug = &gen11_hpd_funcs; else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) i915->display.funcs.hotplug = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) i915->display.funcs.hotplug = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_SPT) i915->display.funcs.hotplug = &spt_hpd_funcs; else i915->display.funcs.hotplug = &ilk_hpd_funcs; } }
linux-master
drivers/gpu/drm/i915/display/intel_hotplug_irq.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" #include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_crt.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dmc.h" #include "intel_dp_aux_regs.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_pcode.h" #include "intel_pps.h" #include "intel_tc.h" #include "intel_vga.h" #include "skl_watermark.h" #include "vlv_sideband.h" #include "vlv_sideband_reg.h" struct i915_power_well_regs { i915_reg_t bios; i915_reg_t driver; i915_reg_t kvmr; i915_reg_t debug; }; struct i915_power_well_ops { const struct i915_power_well_regs *regs; /* * Synchronize the well's hw state to match the current sw state, for * example enable/disable it based on the current refcount. Called * during driver init and resume time, possibly after first calling * the enable/disable handlers. */ void (*sync_hw)(struct drm_i915_private *i915, struct i915_power_well *power_well); /* * Enable the well and resources that depend on it (for example * interrupts located on the well). Called after the 0->1 refcount * transition. */ void (*enable)(struct drm_i915_private *i915, struct i915_power_well *power_well); /* * Disable the well and resources that depend on it. Called after * the 1->0 refcount transition. */ void (*disable)(struct drm_i915_private *i915, struct i915_power_well *power_well); /* Returns the hw enabled state. */ bool (*is_enabled)(struct drm_i915_private *i915, struct i915_power_well *power_well); }; static const struct i915_power_well_instance * i915_power_well_instance(const struct i915_power_well *power_well) { return &power_well->desc->instances->list[power_well->instance_idx]; } struct i915_power_well * lookup_power_well(struct drm_i915_private *i915, enum i915_power_well_id power_well_id) { struct i915_power_well *power_well; for_each_power_well(i915, power_well) if (i915_power_well_instance(power_well)->id == power_well_id) return power_well; /* * It's not feasible to add error checking code to the callers since * this condition really shouldn't happen and it doesn't even make sense * to abort things like display initialization sequences. Just return * the first power well and hope the WARN gets reported so we can fix * our driver. */ drm_WARN(&i915->drm, 1, "Power well %d not defined for this platform\n", power_well_id); return &i915->display.power.domains.power_wells[0]; } void intel_power_well_enable(struct drm_i915_private *i915, struct i915_power_well *power_well) { drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well)); power_well->desc->ops->enable(i915, power_well); power_well->hw_enabled = true; } void intel_power_well_disable(struct drm_i915_private *i915, struct i915_power_well *power_well) { drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well)); power_well->hw_enabled = false; power_well->desc->ops->disable(i915, power_well); } void intel_power_well_sync_hw(struct drm_i915_private *i915, struct i915_power_well *power_well) { power_well->desc->ops->sync_hw(i915, power_well); power_well->hw_enabled = power_well->desc->ops->is_enabled(i915, power_well); } void intel_power_well_get(struct drm_i915_private *i915, struct i915_power_well *power_well) { if (!power_well->count++) intel_power_well_enable(i915, power_well); } void intel_power_well_put(struct drm_i915_private *i915, struct i915_power_well *power_well) { drm_WARN(&i915->drm, !power_well->count, "Use count on power well %s is already zero", i915_power_well_instance(power_well)->name); if (!--power_well->count) intel_power_well_disable(i915, power_well); } bool intel_power_well_is_enabled(struct drm_i915_private *i915, struct i915_power_well *power_well) { return power_well->desc->ops->is_enabled(i915, power_well); } bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well) { return power_well->hw_enabled; } bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id) { struct i915_power_well *power_well; power_well = lookup_power_well(dev_priv, power_well_id); return intel_power_well_is_enabled(dev_priv, power_well); } bool intel_power_well_is_always_on(struct i915_power_well *power_well) { return power_well->desc->always_on; } const char *intel_power_well_name(struct i915_power_well *power_well) { return i915_power_well_instance(power_well)->name; } struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) { return &power_well->domains; } int intel_power_well_refcount(struct i915_power_well *power_well) { return power_well->count; } /* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well * to be enabled, and it will only be disabled if none of the registers is * requesting it to be enabled. */ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, u8 irq_pipe_mask, bool has_vga) { if (has_vga) intel_vga_reset_io_mem(dev_priv); if (irq_pipe_mask) gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); } static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 irq_pipe_mask) { if (irq_pipe_mask) gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); } #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) { int pw_idx = i915_power_well_instance(power_well)->hsw.idx; return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : ICL_AUX_PW_TO_CH(pw_idx); } static struct intel_digital_port * aux_ch_to_digital_port(struct drm_i915_private *dev_priv, enum aux_ch aux_ch) { struct intel_digital_port *dig_port = NULL; struct intel_encoder *encoder; for_each_intel_encoder(&dev_priv->drm, encoder) { /* We'll check the MST primary port */ if (encoder->type == INTEL_OUTPUT_DP_MST) continue; dig_port = enc_to_dig_port(encoder); if (!dig_port) continue; if (dig_port->aux_ch != aux_ch) { dig_port = NULL; continue; } break; } return dig_port; } static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, const struct i915_power_well *power_well) { enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); return intel_port_to_phy(i915, dig_port->base.port); } static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool timeout_expected) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; int timeout = power_well->desc->enable_timeout ? : 1; /* * For some power wells we're not supposed to watch the status bit for * an ack, but rather just wait a fixed amount of time and then * proceed. This is only used on DG2. */ if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) { usleep_range(600, 1200); return; } /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ if (intel_de_wait_for_set(dev_priv, regs->driver, HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", intel_power_well_name(power_well)); drm_WARN_ON(&dev_priv->drm, !timeout_expected); } } static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs, int pw_idx) { u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 ret; ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; if (regs->kvmr.reg) ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; return ret; } static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; bool disabled; u32 reqs; /* * Bspec doesn't require waiting for PWs to get disabled, but still do * this for paranoia. The known cases where a PW will be forced on: * - a KVMR request on any power well via the KVMR request register * - a DMC request on PW1 and MISC_IO power wells via the BIOS and * DEBUG request registers * Skip the wait in case any of the request bits are set and print a * diagnostic message. */ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & HSW_PWR_WELL_CTL_STATE(pw_idx))) || (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); if (disabled) return; drm_dbg_kms(&dev_priv->drm, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", intel_power_well_name(power_well), !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); } static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, enum skl_power_gate pg) { /* Timeout 5us for PG#0, for other PGs 1us */ drm_WARN_ON(&dev_priv->drm, intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, SKL_FUSE_PG_DIST_STATUS(pg), 1)); } static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; if (power_well->desc->has_fuses) { enum skl_power_gate pg; pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); /* Wa_16013190616:adlp */ if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse * state after the enabling. For all other power wells with * fuses we only have to wait for that PW/PG's fuse state * after the enabling. */ if (pg == SKL_PG1) gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well, false); if (power_well->desc->has_fuses) { enum skl_power_gate pg; pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); gen9_wait_for_power_well_fuses(dev_priv, pg); } hsw_power_well_post_enable(dev_priv, power_well->desc->irq_pipe_mask, power_well->desc->has_vga); } static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; hsw_power_well_pre_disable(dev_priv, power_well->desc->irq_pipe_mask); intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) { struct intel_encoder *encoder; for_each_intel_encoder(&i915->drm, encoder) { if (encoder->type == INTEL_OUTPUT_EDP && encoder->port == port) return true; } return false; } static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); if (DISPLAY_VER(dev_priv) < 12) intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), 0, ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && !intel_port_is_edp(dev_priv, (enum port)phy)) intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); } static void icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, struct intel_digital_port *dig_port) { if (drm_WARN_ON(&dev_priv->drm, !dig_port)) return; if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) return; drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); } #else static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, struct intel_digital_port *dig_port) { } #endif #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) static void icl_tc_cold_exit(struct drm_i915_private *i915) { int ret, tries = 0; while (1) { ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, 250, 1); if (ret != -EAGAIN || ++tries == 3) break; msleep(1); } /* Spec states that TC cold exit can take up to 1ms to complete */ if (!ret) msleep(1); /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : "succeeded"); } static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); const struct i915_power_well_regs *regs = power_well->desc->ops->regs; bool is_tbt = power_well->desc->is_tc_tbt; bool timeout_expected; icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); /* * An AUX timeout is expected if the TBT DP tunnel is down, * or need to enable AUX on a legacy TypeC port as part of the TC-cold * exit sequence. */ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) icl_tc_cold_exit(dev_priv); hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { enum tc_port tc_port; tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) & DKL_CMN_UC_DW27_UC_HEALTH, 1)) drm_warn(&dev_priv->drm, "Timeout waiting TC uC health\n"); } } static void icl_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); if (intel_phy_is_tc(dev_priv, phy)) return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); else if (IS_ICELAKE(dev_priv)) return icl_combo_phy_aux_power_well_enable(dev_priv, power_well); else return hsw_power_well_enable(dev_priv, power_well); } static void icl_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); if (intel_phy_is_tc(dev_priv, phy)) return hsw_power_well_disable(dev_priv, power_well); else if (IS_ICELAKE(dev_priv)) return icl_combo_phy_aux_power_well_disable(dev_priv, power_well); else return hsw_power_well_disable(dev_priv, power_well); } /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to * be enabled. */ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | HSW_PWR_WELL_CTL_STATE(pw_idx); u32 val; val = intel_de_read(dev_priv, regs->driver); /* * On GEN9 big core due to a DMC bug the driver's request bits for PW1 * and the MISC_IO PW will be not restored, so check instead for the * BIOS's own request bits, which are forced-on for these power wells * when exiting DC5/6. */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) val |= intel_de_read(dev_priv, regs->bios); return (val & mask) == mask; } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { drm_WARN_ONCE(&dev_priv->drm, (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n"); drm_WARN_ONCE(&dev_priv->drm, intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); drm_WARN_ONCE(&dev_priv->drm, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n"); drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); /* * TODO: check for the following to verify the conditions to enter DC9 * state are satisfied: * 1] Check relevant display engine registers to verify if mode set * disable sequence was followed. * 2] Check if display uninitialize sequence is initialized. */ } static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) { drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); drm_WARN_ONCE(&dev_priv->drm, intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled.\n"); /* * TODO: check for the following to verify DC9 state was indeed * entered before programming to disable it: * 1] Check relevant display engine registers to verify if mode * set disable sequence was followed. * 2] Check if display uninitialize sequence is initialized. */ } static void gen9_write_dc_state(struct drm_i915_private *dev_priv, u32 state) { int rewrites = 0; int rereads = 0; u32 v; intel_de_write(dev_priv, DC_STATE_EN, state); /* It has been observed that disabling the dc6 state sometimes * doesn't stick and dmc keeps returning old value. Make sure * the write really sticks enough times and also force rewrite until * we are confident that state is exactly what we want. */ do { v = intel_de_read(dev_priv, DC_STATE_EN); if (v != state) { intel_de_write(dev_priv, DC_STATE_EN, state); rewrites++; rereads = 0; } else if (rereads++ > 5) { break; } } while (rewrites < 100); if (v != state) drm_err(&dev_priv->drm, "Writing dc state to 0x%x failed, now 0x%x\n", state, v); /* Most of the times we need one retry, avoid spam */ if (rewrites > 1) drm_dbg_kms(&dev_priv->drm, "Rewrote dc state to 0x%x %d times\n", state, rewrites); } static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) { u32 mask; mask = DC_STATE_EN_UPTO_DC5; if (DISPLAY_VER(dev_priv) >= 12) mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; else if (DISPLAY_VER(dev_priv) == 11) mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) mask |= DC_STATE_EN_DC9; else mask |= DC_STATE_EN_UPTO_DC6; return mask; } void gen9_sanitize_dc_state(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->display.power.domains; u32 val; if (!HAS_DISPLAY(i915)) return; val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); drm_dbg_kms(&i915->drm, "Resetting DC state tracking from %02x to %02x\n", power_domains->dc_state, val); power_domains->dc_state = val; } /** * gen9_set_dc_state - set target display C power state * @dev_priv: i915 device instance * @state: target DC power state * - DC_STATE_DISABLE * - DC_STATE_EN_UPTO_DC5 * - DC_STATE_EN_UPTO_DC6 * - DC_STATE_EN_DC9 * * Signal to DMC firmware/HW the target DC power state passed in @state. * DMC/HW can turn off individual display clocks and power rails when entering * a deeper DC power state (higher in number) and turns these back when exiting * that state to a shallower power state (lower in number). The HW will decide * when to actually enter a given state on an on-demand basis, for instance * depending on the active state of display pipes. The state of display * registers backed by affected power rails are saved/restored as needed. * * Based on the above enabling a deeper DC power state is asynchronous wrt. * enabling it. Disabling a deeper power state is synchronous: for instance * setting %DC_STATE_DISABLE won't complete until all HW resources are turned * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored. */ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 val; u32 mask; if (!HAS_DISPLAY(dev_priv)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, state & ~power_domains->allowed_dc_mask)) state &= power_domains->allowed_dc_mask; val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", val & mask, state); /* Check if DMC is ignoring our DC state requests */ if ((val & mask) != power_domains->dc_state) drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", power_domains->dc_state, val & mask); val &= ~mask; val |= state; gen9_write_dc_state(dev_priv, val); power_domains->dc_state = val & mask; } static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); } static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 */ usleep_range(200, 210); } static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { enum i915_power_well_id high_pg; /* Power wells at this level and above must be disabled for DC5 entry */ if (DISPLAY_VER(dev_priv) == 12) high_pg = ICL_DISP_PW_3; else high_pg = SKL_DISP_PW_2; drm_WARN_ONCE(&dev_priv->drm, intel_display_power_well_is_enabled(dev_priv, high_pg), "Power wells above platform's DC5 limit still enabled.\n"); drm_WARN_ONCE(&dev_priv->drm, (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); assert_rpm_wakelock_held(&dev_priv->runtime_pm); assert_dmc_loaded(dev_priv); } void gen9_enable_dc5(struct drm_i915_private *dev_priv) { assert_can_enable_dc5(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) { drm_WARN_ONCE(&dev_priv->drm, (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), "Utility pin enabled in PWM mode\n"); drm_WARN_ONCE(&dev_priv->drm, (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), "DC6 already programmed to be enabled.\n"); assert_dmc_loaded(dev_priv); } void skl_enable_dc6(struct drm_i915_private *dev_priv) { assert_can_enable_dc6(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } void bxt_enable_dc9(struct drm_i915_private *dev_priv) { assert_can_enable_dc9(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); /* * Power sequencer reset is not needed on * platforms with South Display Engine on PCH, * because PPS registers are always on. */ if (!HAS_PCH_SPLIT(dev_priv)) intel_pps_reset_all(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); } void bxt_disable_dc9(struct drm_i915_private *dev_priv) { assert_can_disable_dc9(dev_priv); drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); intel_pps_unlock_regs_wa(dev_priv); } static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 bios_req = intel_de_read(dev_priv, regs->bios); /* Take over the request bit if set by BIOS. */ if (bios_req & mask) { u32 drv_req = intel_de_read(dev_priv, regs->driver); if (!(drv_req & mask)) intel_de_write(dev_priv, regs->driver, drv_req | mask); intel_de_write(dev_priv, regs->bios, bios_req & ~mask); } } static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) { struct i915_power_well *power_well; power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (intel_power_well_refcount(power_well) > 0) bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (intel_power_well_refcount(power_well) > 0) bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DISP_PW_DPIO_CMN_C); if (intel_power_well_refcount(power_well) > 0) bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); } } static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); } static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) { u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices; drm_WARN(&dev_priv->drm, hw_enabled_dbuf_slices != enabled_dbuf_slices, "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", hw_enabled_dbuf_slices, enabled_dbuf_slices); } void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_cdclk_config cdclk_config = {}; if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); return; } gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); if (!HAS_DISPLAY(dev_priv)) return; intel_cdclk_get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ drm_WARN_ON(&dev_priv->drm, intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_verify_ddi_phy_power_wells(dev_priv); if (DISPLAY_VER(dev_priv) >= 11) /* * DMC retains HW context only for port A, the other combo * PHY's HW context for port B is lost after DC transitions, * so we need to restore it manually. */ intel_combo_phy_init(dev_priv); } static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { gen9_disable_dc_states(dev_priv); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; if (!intel_dmc_has_payload(dev_priv)) return; switch (power_domains->target_dc_state) { case DC_STATE_EN_DC3CO: tgl_enable_dc3co(dev_priv); break; case DC_STATE_EN_UPTO_DC6: skl_enable_dc6(dev_priv); break; case DC_STATE_EN_UPTO_DC5: gen9_enable_dc5(dev_priv); break; } } static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { } static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { } static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return true; } static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_A); if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_B); } static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { i830_disable_pipe(dev_priv, PIPE_B); i830_disable_pipe(dev_priv, PIPE_A); } static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { if (intel_power_well_refcount(power_well) > 0) i830_pipes_power_well_enable(dev_priv, power_well); else i830_pipes_power_well_disable(dev_priv, power_well); } static void vlv_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { int pw_idx = i915_power_well_instance(power_well)->vlv.idx; u32 mask; u32 state; u32 ctrl; mask = PUNIT_PWRGT_MASK(pw_idx); state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : PUNIT_PWRGT_PWR_GATE(pw_idx); vlv_punit_get(dev_priv); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) if (COND) goto out; ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); ctrl &= ~mask; ctrl |= state; vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); if (wait_for(COND, 100)) drm_err(&dev_priv->drm, "timeout setting power well state %08x (%08x)\n", state, vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); #undef COND out: vlv_punit_put(dev_priv); } static void vlv_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { vlv_set_power_well(dev_priv, power_well, true); } static void vlv_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { vlv_set_power_well(dev_priv, power_well, false); } static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { int pw_idx = i915_power_well_instance(power_well)->vlv.idx; bool enabled = false; u32 mask; u32 state; u32 ctrl; mask = PUNIT_PWRGT_MASK(pw_idx); ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); vlv_punit_get(dev_priv); state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; /* * We only ever set the power-on and power-gate states, anything * else is unexpected. */ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && state != PUNIT_PWRGT_PWR_GATE(pw_idx)); if (state == ctrl) enabled = true; /* * A transient state at this point would mean some unexpected party * is poking at the power controls too. */ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; drm_WARN_ON(&dev_priv->drm, ctrl != state); vlv_punit_put(dev_priv); return enabled; } static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) { /* * On driver load, a pipe may be active and driving a DSI display. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); /* * Disable trickle feed and enable pnd deadline calculation */ intel_de_write(dev_priv, MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); intel_de_write(dev_priv, CBR1_VLV, 0); drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); intel_de_write(dev_priv, RAWCLK_FREQ_VLV, DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; enum pipe pipe; /* * Enable the CRI clock source so we can get at the * display and the reference clock for VGA * hotplug / manual detection. Supposedly DSI also * needs the ref clock up and running. * * CHV DPLL B/C have some issues if VGA mode is enabled. */ for_each_pipe(dev_priv, pipe) { u32 val = intel_de_read(dev_priv, DPLL(pipe)); val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; intel_de_write(dev_priv, DPLL(pipe), val); } vlv_init_display_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); valleyview_enable_display_irqs(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); /* * During driver initialization/resume we can avoid restoring the * part of the HW/SW state that will be inited anyway explicitly. */ if (dev_priv->display.power.domains.initializing) return; intel_hpd_init(dev_priv); intel_hpd_poll_disable(dev_priv); /* Re-enable the ADPA, if we have one */ for_each_intel_encoder(&dev_priv->drm, encoder) { if (encoder->type == INTEL_OUTPUT_ANALOG) intel_crt_reset(&encoder->base); } intel_vga_redisable_power_on(dev_priv); intel_pps_unlock_regs_wa(dev_priv); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); valleyview_disable_display_irqs(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ intel_synchronize_irq(dev_priv); intel_pps_reset_all(dev_priv); /* Prevent us from re-enabling polling on accident in late suspend */ if (!dev_priv->drm.dev->power.is_suspended) intel_hpd_poll_enable(dev_priv); } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { vlv_set_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); } static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { vlv_display_power_well_deinit(dev_priv); vlv_set_power_well(dev_priv, power_well, false); } static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { /* since ref/cri clock was enabled */ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ vlv_set_power_well(dev_priv, power_well, true); /* * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. * a. GUnit 0x2110 bit[0] set to 1 (def 0) * b. The other bits such as sfr settings / modesel may all * be set to 0. * * This should only be done on init and resume from S3 with * both PLLs disabled, or we risk losing DPIO and PLL * synchronization. */ intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); } static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum pipe pipe; for_each_pipe(dev_priv, pipe) assert_pll_disabled(dev_priv, pipe); /* Assert common reset */ intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); vlv_set_power_well(dev_priv, power_well, false); } #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) static void assert_chv_phy_status(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); u32 phy_control = dev_priv->display.power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; /* * The BIOS can leave the PHY is some weird state * where it doesn't fully power down some parts. * Disable the asserts until the PHY has been fully * reset (ie. the power well has been disabled at * least once). */ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { phy_status |= PHY_POWERGOOD(DPIO_PHY0); /* this assumes override is only used to enable lanes */ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); /* CL1 is on whenever anything is on in either channel */ if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); /* * The DPLLB check accounts for the pipe B + port A usage * with CL2 powered up but all the lanes in the second channel * powered down. */ if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); } if (intel_power_well_is_enabled(dev_priv, cmn_d)) { phy_status |= PHY_POWERGOOD(DPIO_PHY1); /* this assumes override is only used to enable lanes */ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); } phy_status &= phy_status_mask; /* * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, 10)) drm_err(&dev_priv->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, phy_status, dev_priv->display.power.chv_phy_control); } #undef BITS_SET static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; enum pipe pipe; u32 tmp; drm_WARN_ON_ONCE(&dev_priv->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); if (id == VLV_DISP_PW_DPIO_CMN_BC) { pipe = PIPE_A; phy = DPIO_PHY0; } else { pipe = PIPE_C; phy = DPIO_PHY1; } /* since ref/cri clock was enabled */ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), 1)) drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", phy); vlv_dpio_get(dev_priv); /* Enable dynamic power down */ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); if (id == VLV_DISP_PW_DPIO_CMN_BC) { tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); } else { /* * Force the non-existing CL2 off. BXT does this * too, so maybe it saves some power even though * CL2 doesn't exist? */ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); tmp |= DPIO_CL2_LDOFUSE_PWRENB; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); } vlv_dpio_put(dev_priv); dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", phy, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); } static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; drm_WARN_ON_ONCE(&dev_priv->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); if (id == VLV_DISP_PW_DPIO_CMN_BC) { phy = DPIO_PHY0; assert_pll_disabled(dev_priv, PIPE_A); assert_pll_disabled(dev_priv, PIPE_B); } else { phy = DPIO_PHY1; assert_pll_disabled(dev_priv, PIPE_C); } dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, dev_priv->display.power.chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); drm_dbg_kms(&dev_priv->drm, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", phy, dev_priv->display.power.chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ dev_priv->display.power.chv_phy_assert[phy] = true; assert_chv_phy_status(dev_priv); } static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override, unsigned int mask) { enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; u32 reg, val, expected, actual; /* * The BIOS can leave the PHY is some weird state * where it doesn't fully power down some parts. * Disable the asserts until the PHY has been fully * reset (ie. the power well has been disabled at * least once). */ if (!dev_priv->display.power.chv_phy_assert[phy]) return; if (ch == DPIO_CH0) reg = _CHV_CMN_DW0_CH0; else reg = _CHV_CMN_DW6_CH1; vlv_dpio_get(dev_priv); val = vlv_dpio_read(dev_priv, pipe, reg); vlv_dpio_put(dev_priv); /* * This assumes !override is only used when the port is disabled. * All lanes should power down even without the override when * the port is disabled. */ if (!override || mask == 0xf) { expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; /* * If CH1 common lane is not active anymore * (eg. for pipe B DPLL) the entire channel will * shut down, which causes the common lane registers * to read as 0. That means we can't actually check * the lane power down status bits, but as the entire * register reads as 0 it's a good indication that the * channel is indeed entirely powered down. */ if (ch == DPIO_CH1 && val == 0) expected = 0; } else if (mask != 0x0) { expected = DPIO_ANYDL_POWERDOWN; } else { expected = 0; } if (ch == DPIO_CH0) actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; else actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; drm_WARN(&dev_priv->drm, actual != expected, "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), reg, val); } bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; bool was_override; mutex_lock(&power_domains->lock); was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); if (override == was_override) goto out; if (override) dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", phy, ch, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); out: mutex_unlock(&power_domains->lock); return was_override; } void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_power_domains *power_domains = &dev_priv->display.power.domains; enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); if (override) dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", phy, ch, mask, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); mutex_unlock(&power_domains->lock); } static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum pipe pipe = PIPE_A; bool enabled; u32 state, ctrl; vlv_punit_get(dev_priv); state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); /* * We only ever set the power-on and power-gate states, anything * else is unexpected. */ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); enabled = state == DP_SSS_PWR_ON(pipe); /* * A transient state at this point would mean some unexpected party * is poking at the power controls too. */ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); vlv_punit_put(dev_priv); return enabled; } static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { enum pipe pipe = PIPE_A; u32 state; u32 ctrl; state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); vlv_punit_get(dev_priv); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) if (COND) goto out; ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); ctrl &= ~DP_SSC_MASK(pipe); ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); if (wait_for(COND, 100)) drm_err(&dev_priv->drm, "timeout setting power well state %08x (%08x)\n", state, vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); #undef COND out: vlv_punit_put(dev_priv); } static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, dev_priv->display.power.chv_phy_control); } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { chv_set_pipe_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); } static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { vlv_display_power_well_deinit(dev_priv); chv_set_pipe_power_well(dev_priv, power_well, false); } static void tgl_tc_cold_request(struct drm_i915_private *i915, bool block) { u8 tries = 0; int ret; while (1) { u32 low_val; u32 high_val = 0; if (block) low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; else low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; /* * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us */ ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block && (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) ret = -EIO; else break; } if (++tries == 3) break; msleep(1); } if (ret) drm_err(&i915->drm, "TC cold %sblock failed\n", block ? "" : "un"); else drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", block ? "" : "un"); } static void tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, struct i915_power_well *power_well) { tgl_tc_cold_request(i915, true); } static void tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, struct i915_power_well *power_well) { tgl_tc_cold_request(i915, false); } static void tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, struct i915_power_well *power_well) { if (intel_power_well_refcount(power_well) > 0) tgl_tc_cold_off_power_well_enable(i915, power_well); else tgl_tc_cold_off_power_well_disable(i915, power_well); } static bool tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { /* * Not the correctly implementation but there is no way to just read it * from PCODE, so returning count to avoid state mismatch errors */ return intel_power_well_refcount(power_well); } static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); /* * The power status flag cannot be used to determine whether aux * power wells have finished powering up. Instead we're * expected to just wait a fixed 600us after raising the request * bit. */ usleep_range(600, 1200); } static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 0); usleep_range(10, 30); } static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) & XELPDP_DP_AUX_CH_CTL_POWER_STATUS; } const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, .disable = i9xx_always_on_power_well_noop, .is_enabled = i9xx_always_on_power_well_enabled, }; const struct i915_power_well_ops chv_pipe_power_well_ops = { .sync_hw = chv_pipe_power_well_sync_hw, .enable = chv_pipe_power_well_enable, .disable = chv_pipe_power_well_disable, .is_enabled = chv_pipe_power_well_enabled, }; const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = chv_dpio_cmn_power_well_enable, .disable = chv_dpio_cmn_power_well_disable, .is_enabled = vlv_power_well_enabled, }; const struct i915_power_well_ops i830_pipes_power_well_ops = { .sync_hw = i830_pipes_power_well_sync_hw, .enable = i830_pipes_power_well_enable, .disable = i830_pipes_power_well_disable, .is_enabled = i830_pipes_power_well_enabled, }; static const struct i915_power_well_regs hsw_power_well_regs = { .bios = HSW_PWR_WELL_CTL1, .driver = HSW_PWR_WELL_CTL2, .kvmr = HSW_PWR_WELL_CTL3, .debug = HSW_PWR_WELL_CTL4, }; const struct i915_power_well_ops hsw_power_well_ops = { .regs = &hsw_power_well_regs, .sync_hw = hsw_power_well_sync_hw, .enable = hsw_power_well_enable, .disable = hsw_power_well_disable, .is_enabled = hsw_power_well_enabled, }; const struct i915_power_well_ops gen9_dc_off_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = gen9_dc_off_power_well_enable, .disable = gen9_dc_off_power_well_disable, .is_enabled = gen9_dc_off_power_well_enabled, }; const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = bxt_dpio_cmn_power_well_enable, .disable = bxt_dpio_cmn_power_well_disable, .is_enabled = bxt_dpio_cmn_power_well_enabled, }; const struct i915_power_well_ops vlv_display_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = vlv_display_power_well_enable, .disable = vlv_display_power_well_disable, .is_enabled = vlv_power_well_enabled, }; const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = vlv_dpio_cmn_power_well_enable, .disable = vlv_dpio_cmn_power_well_disable, .is_enabled = vlv_power_well_enabled, }; const struct i915_power_well_ops vlv_dpio_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = vlv_power_well_enable, .disable = vlv_power_well_disable, .is_enabled = vlv_power_well_enabled, }; static const struct i915_power_well_regs icl_aux_power_well_regs = { .bios = ICL_PWR_WELL_CTL_AUX1, .driver = ICL_PWR_WELL_CTL_AUX2, .debug = ICL_PWR_WELL_CTL_AUX4, }; const struct i915_power_well_ops icl_aux_power_well_ops = { .regs = &icl_aux_power_well_regs, .sync_hw = hsw_power_well_sync_hw, .enable = icl_aux_power_well_enable, .disable = icl_aux_power_well_disable, .is_enabled = hsw_power_well_enabled, }; static const struct i915_power_well_regs icl_ddi_power_well_regs = { .bios = ICL_PWR_WELL_CTL_DDI1, .driver = ICL_PWR_WELL_CTL_DDI2, .debug = ICL_PWR_WELL_CTL_DDI4, }; const struct i915_power_well_ops icl_ddi_power_well_ops = { .regs = &icl_ddi_power_well_regs, .sync_hw = hsw_power_well_sync_hw, .enable = hsw_power_well_enable, .disable = hsw_power_well_disable, .is_enabled = hsw_power_well_enabled, }; const struct i915_power_well_ops tgl_tc_cold_off_ops = { .sync_hw = tgl_tc_cold_off_power_well_sync_hw, .enable = tgl_tc_cold_off_power_well_enable, .disable = tgl_tc_cold_off_power_well_disable, .is_enabled = tgl_tc_cold_off_power_well_is_enabled, }; const struct i915_power_well_ops xelpdp_aux_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = xelpdp_aux_power_well_enable, .disable = xelpdp_aux_power_well_disable, .is_enabled = xelpdp_aux_power_well_enabled, };
linux-master
drivers/gpu/drm/i915/display/intel_display_power_well.c
// SPDX-License-Identifier: MIT /* * Copyright 2023, Intel Corporation. */ #include <drm/i915_hdcp_interface.h> #include "gem/i915_gem_region.h" #include "gt/intel_gt.h" #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" #include "i915_drv.h" #include "i915_utils.h" #include "intel_hdcp_gsc.h" bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 14; } static int gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_init *ake_data) { struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; struct wired_cmd_initiate_hdcp2_session_out session_init_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !ake_data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } session_init_in.header.api_version = HDCP_API_VERSION; session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; session_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; session_init_in.port.integrated_port_type = data->port_type; session_init_in.port.physical_port = (u8)data->hdcp_ddi; session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; session_init_in.protocol = data->protocol; byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in, sizeof(session_init_in), (u8 *)&session_init_out, sizeof(session_init_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_INITIATE_HDCP2_SESSION, session_init_out.header.status); return -EIO; } ake_data->msg_id = HDCP_2_2_AKE_INIT; ake_data->tx_caps = session_init_out.tx_caps; memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); return 0; } static int gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_send_cert *rx_cert, bool *km_stored, struct hdcp2_ake_no_stored_km *ek_pub_km, size_t *msg_sz) { struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } verify_rxcert_in.header.api_version = HDCP_API_VERSION; verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_rxcert_in.header.buffer_len = WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; verify_rxcert_in.port.integrated_port_type = data->port_type; verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; verify_rxcert_in.cert_rx = rx_cert->cert_rx; memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in, sizeof(verify_rxcert_in), (u8 *)&verify_rxcert_out, sizeof(verify_rxcert_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); return byte; } if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_VERIFY_RECEIVER_CERT, verify_rxcert_out.header.status); return -EIO; } *km_stored = !!verify_rxcert_out.km_stored; if (verify_rxcert_out.km_stored) { ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; *msg_sz = sizeof(struct hdcp2_ake_stored_km); } else { ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); } memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, sizeof(verify_rxcert_out.ekm_buff)); return 0; } static int gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_send_hprime *rx_hprime) { struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_hprime) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } send_hprime_in.header.api_version = HDCP_API_VERSION; send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; send_hprime_in.port.integrated_port_type = data->port_type; send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, HDCP_2_2_H_PRIME_LEN); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in, sizeof(send_hprime_in), (u8 *)&send_hprime_out, sizeof(send_hprime_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); return -EIO; } return 0; } static int gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ake_send_pairing_info *pairing_info) { struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !pairing_info) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } pairing_info_in.header.api_version = HDCP_API_VERSION; pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; pairing_info_in.header.buffer_len = WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; pairing_info_in.port.integrated_port_type = data->port_type; pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, HDCP_2_2_E_KH_KM_LEN); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in, sizeof(pairing_info_in), (u8 *)&pairing_info_out, sizeof(pairing_info_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", WIRED_AKE_SEND_PAIRING_INFO, pairing_info_out.header.status); return -EIO; } return 0; } static int gsc_hdcp_initiate_locality_check(struct device *dev, struct hdcp_port_data *data, struct hdcp2_lc_init *lc_init_data) { struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !lc_init_data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } lc_init_in.header.api_version = HDCP_API_VERSION; lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; lc_init_in.port.integrated_port_type = data->port_type; lc_init_in.port.physical_port = (u8)data->hdcp_ddi; lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), (u8 *)&lc_init_out, sizeof(lc_init_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); return -EIO; } lc_init_data->msg_id = HDCP_2_2_LC_INIT; memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); return 0; } static int gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, struct hdcp2_lc_send_lprime *rx_lprime) { struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !rx_lprime) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } verify_lprime_in.header.api_version = HDCP_API_VERSION; verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_lprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; verify_lprime_in.port.integrated_port_type = data->port_type; verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, HDCP_2_2_L_PRIME_LEN); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in, sizeof(verify_lprime_in), (u8 *)&verify_lprime_out, sizeof(verify_lprime_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VALIDATE_LOCALITY, verify_lprime_out.header.status); return -EIO; } return 0; } static int gsc_hdcp_get_session_key(struct device *dev, struct hdcp_port_data *data, struct hdcp2_ske_send_eks *ske_data) { struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data || !ske_data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } get_skey_in.header.api_version = HDCP_API_VERSION; get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; get_skey_in.port.integrated_port_type = data->port_type; get_skey_in.port.physical_port = (u8)data->hdcp_ddi; get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), (u8 *)&get_skey_out, sizeof(get_skey_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_GET_SESSION_KEY, get_skey_out.header.status); return -EIO; } ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, HDCP_2_2_E_DKEY_KS_LEN); memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); return 0; } static int gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev, struct hdcp_port_data *data, struct hdcp2_rep_send_receiverid_list *rep_topology, struct hdcp2_rep_send_ack *rep_send_ack) { struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !rep_topology || !rep_send_ack || !data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } verify_repeater_in.header.api_version = HDCP_API_VERSION; verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_repeater_in.header.buffer_len = WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; verify_repeater_in.port.integrated_port_type = data->port_type; verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, HDCP_2_2_RXINFO_LEN); memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, HDCP_2_2_SEQ_NUM_LEN); memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, HDCP_2_2_V_PRIME_HALF_LEN); memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, HDCP_2_2_RECEIVER_IDS_MAX_LEN); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in, sizeof(verify_repeater_in), (u8 *)&verify_repeater_out, sizeof(verify_repeater_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_VERIFY_REPEATER, verify_repeater_out.header.status); return -EIO; } memcpy(rep_send_ack->v, verify_repeater_out.v, HDCP_2_2_V_PRIME_HALF_LEN); rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; return 0; } static int gsc_hdcp_verify_mprime(struct device *dev, struct hdcp_port_data *data, struct hdcp2_rep_stream_ready *stream_ready) { struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; size_t cmd_size; if (!dev || !stream_ready || !data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } cmd_size = struct_size(verify_mprime_in, streams, data->k); if (cmd_size == SIZE_MAX) return -EINVAL; verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); if (!verify_mprime_in) return -ENOMEM; verify_mprime_in->header.api_version = HDCP_API_VERSION; verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); verify_mprime_in->port.integrated_port_type = data->port_type; verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); memcpy(verify_mprime_in->streams, data->streams, array_size(data->k, sizeof(*data->streams))); verify_mprime_in->k = cpu_to_be16(data->k); byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size, (u8 *)&verify_mprime_out, sizeof(verify_mprime_out)); kfree(verify_mprime_in); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_REPEATER_AUTH_STREAM_REQ, verify_mprime_out.header.status); return -EIO; } return 0; } static int gsc_hdcp_enable_authentication(struct device *dev, struct hdcp_port_data *data) { struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } enable_auth_in.header.api_version = HDCP_API_VERSION; enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; enable_auth_in.port.integrated_port_type = data->port_type; enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; enable_auth_in.stream_type = data->streams[0].stream_type; byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in, sizeof(enable_auth_in), (u8 *)&enable_auth_out, sizeof(enable_auth_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", WIRED_ENABLE_AUTH, enable_auth_out.header.status); return -EIO; } return 0; } static int gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) { struct wired_cmd_close_session_in session_close_in = { { 0 } }; struct wired_cmd_close_session_out session_close_out = { { 0 } }; struct drm_i915_private *i915; ssize_t byte; if (!dev || !data) return -EINVAL; i915 = kdev_to_i915(dev); if (!i915) { dev_err(dev, "DRM not initialized, aborting HDCP.\n"); return -ENODEV; } session_close_in.header.api_version = HDCP_API_VERSION; session_close_in.header.command_id = WIRED_CLOSE_SESSION; session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; session_close_in.header.buffer_len = WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; session_close_in.port.integrated_port_type = data->port_type; session_close_in.port.physical_port = (u8)data->hdcp_ddi; session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in, sizeof(session_close_in), (u8 *)&session_close_out, sizeof(session_close_out)); if (byte < 0) { drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); return byte; } if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", session_close_out.header.status); return -EIO; } return 0; } static const struct i915_hdcp_ops gsc_hdcp_ops = { .initiate_hdcp2_session = gsc_hdcp_initiate_session, .verify_receiver_cert_prepare_km = gsc_hdcp_verify_receiver_cert_prepare_km, .verify_hprime = gsc_hdcp_verify_hprime, .store_pairing_info = gsc_hdcp_store_pairing_info, .initiate_locality_check = gsc_hdcp_initiate_locality_check, .verify_lprime = gsc_hdcp_verify_lprime, .get_session_key = gsc_hdcp_get_session_key, .repeater_check_flow_prepare_ack = gsc_hdcp_repeater_check_flow_prepare_ack, .verify_mprime = gsc_hdcp_verify_mprime, .enable_hdcp_authentication = gsc_hdcp_enable_authentication, .close_hdcp_session = gsc_hdcp_close_session, }; /*This function helps allocate memory for the command that we will send to gsc cs */ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, struct intel_hdcp_gsc_message *hdcp_message) { struct intel_gt *gt = i915->media_gt; struct drm_i915_gem_object *obj = NULL; struct i915_vma *vma = NULL; void *cmd_in, *cmd_out; int err; /* allocate object of two page for HDCP command memory and store it */ obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE); if (IS_ERR(obj)) { drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n"); return PTR_ERR(obj); } cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true)); if (IS_ERR(cmd_in)) { drm_err(&i915->drm, "Failed to map gsc message page!\n"); err = PTR_ERR(cmd_in); goto out_unpin; } cmd_out = cmd_in + PAGE_SIZE; vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_unmap; } err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); if (err) goto out_unmap; memset(cmd_in, 0, obj->base.size); hdcp_message->hdcp_cmd_in = cmd_in; hdcp_message->hdcp_cmd_out = cmd_out; hdcp_message->vma = vma; return 0; out_unmap: i915_gem_object_unpin_map(obj); out_unpin: i915_gem_object_put(obj); return err; } static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) { struct intel_hdcp_gsc_message *hdcp_message; int ret; hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); if (!hdcp_message) return -ENOMEM; /* * NOTE: No need to lock the comp mutex here as it is already * going to be taken before this function called */ i915->display.hdcp.hdcp_message = hdcp_message; ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message); if (ret) drm_err(&i915->drm, "Could not initialize hdcp_message\n"); return ret; } static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) { struct intel_hdcp_gsc_message *hdcp_message = i915->display.hdcp.hdcp_message; hdcp_message->hdcp_cmd_in = NULL; hdcp_message->hdcp_cmd_out = NULL; i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP); kfree(hdcp_message); } int intel_hdcp_gsc_init(struct drm_i915_private *i915) { struct i915_hdcp_arbiter *data; int ret; data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL); if (!data) return -ENOMEM; mutex_lock(&i915->display.hdcp.hdcp_mutex); i915->display.hdcp.arbiter = data; i915->display.hdcp.arbiter->hdcp_dev = i915->drm.dev; i915->display.hdcp.arbiter->ops = &gsc_hdcp_ops; ret = intel_hdcp_gsc_hdcp2_init(i915); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } void intel_hdcp_gsc_fini(struct drm_i915_private *i915) { intel_hdcp_gsc_free_message(i915); kfree(i915->display.hdcp.arbiter); } static int intel_gsc_send_sync(struct drm_i915_private *i915, struct intel_gsc_mtl_header *header_in, struct intel_gsc_mtl_header *header_out, u64 addr_in, u64 addr_out, size_t msg_out_len) { struct intel_gt *gt = i915->media_gt; int ret; ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr_in, header_in->message_size, addr_out, msg_out_len + sizeof(*header_out)); if (ret) { drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret); return ret; } /* * Checking validity marker and header status to see if some error has * blocked us from sending message to gsc cs */ if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) { drm_err(&i915->drm, "invalid validity marker\n"); return -EINVAL; } if (header_out->status != 0) { drm_err(&i915->drm, "header status indicates error %d\n", header_out->status); return -EINVAL; } if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) { header_in->gsc_message_handle = header_out->gsc_message_handle; return -EAGAIN; } return 0; } /* * This function can now be used for sending requests and will also handle * receipt of reply messages hence no different function of message retrieval * is required. We will initialize intel_hdcp_gsc_message structure then add * gsc cs memory header as stated in specs after which the normal HDCP payload * will follow */ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len) { struct intel_gt *gt = i915->media_gt; struct intel_gsc_mtl_header *header_in, *header_out; const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in); struct intel_hdcp_gsc_message *hdcp_message; u64 addr_in, addr_out, host_session_id; u32 reply_size, msg_size_in, msg_size_out; int ret, tries = 0; if (!intel_uc_uses_gsc_uc(&gt->uc)) return -ENODEV; if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) return -ENOSPC; msg_size_in = msg_in_len + sizeof(*header_in); msg_size_out = msg_out_len + sizeof(*header_out); hdcp_message = i915->display.hdcp.hdcp_message; header_in = hdcp_message->hdcp_cmd_in; header_out = hdcp_message->hdcp_cmd_out; addr_in = i915_ggtt_offset(hdcp_message->vma); addr_out = addr_in + PAGE_SIZE; memset(header_in, 0, msg_size_in); memset(header_out, 0, msg_size_out); get_random_bytes(&host_session_id, sizeof(u64)); intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP, msg_size_in, host_session_id); memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len); /* * Keep sending request in case the pending bit is set no need to add * message handle as we are using same address hence loc. of header is * same and it will contain the message handle. we will send the message * 20 times each message 50 ms apart */ do { ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in, addr_out, msg_out_len); /* Only try again if gsc says so */ if (ret != -EAGAIN) break; msleep(50); } while (++tries < 20); if (ret) goto err; /* we use the same mem for the reply, so header is in the same loc */ reply_size = header_out->message_size - sizeof(*header_out); if (reply_size > msg_out_len) { drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n", reply_size, (u32)msg_out_len); reply_size = msg_out_len; } else if (reply_size != msg_out_len) { drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n", reply_size, (u32)msg_out_len); } memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len); err: return ret; }
linux-master
drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * */ #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_dsc_helper.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_gmbus.h" #define _INTEL_BIOS_PRIVATE #include "intel_vbt_defs.h" /** * DOC: Video BIOS Table (VBT) * * The Video BIOS Table, or VBT, provides platform and board specific * configuration information to the driver that is not discoverable or available * through other means. The configuration is mostly related to display * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in * the PCI ROM. * * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that * contain the actual configuration information. The VBT Header, and thus the * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the * BDB Header. The data blocks are concatenated after the BDB Header. The data * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of * data. (Block 53, the MIPI Sequence Block is an exception.) * * The driver parses the VBT during load. The relevant information is stored in * driver private data for ease of use, and the actual VBT is not read after * that. */ /* Wrapper for VBT child device config */ struct intel_bios_encoder_data { struct drm_i915_private *i915; struct child_device_config child; struct dsc_compression_parameters_entry *dsc; struct list_head node; }; #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 /* Get BDB block size given a pointer to Block ID. */ static u32 _get_blocksize(const u8 *block_base) { /* The MIPI Sequence Block v3+ has a separate size field. */ if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3) return *((const u32 *)(block_base + 4)); else return *((const u16 *)(block_base + 1)); } /* Get BDB block size give a pointer to data after Block ID and Block Size. */ static u32 get_blocksize(const void *block_data) { return _get_blocksize(block_data - 3); } static const void * find_raw_section(const void *_bdb, enum bdb_block_id section_id) { const struct bdb_header *bdb = _bdb; const u8 *base = _bdb; int index = 0; u32 total, current_size; enum bdb_block_id current_id; /* skip to first section */ index += bdb->header_size; total = bdb->bdb_size; /* walk the sections looking for section_id */ while (index + 3 < total) { current_id = *(base + index); current_size = _get_blocksize(base + index); index += 3; if (index + current_size > total) return NULL; if (current_id == section_id) return base + index; index += current_size; } return NULL; } /* * Offset from the start of BDB to the start of the * block data (just past the block header). */ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id) { const void *block; block = find_raw_section(bdb, section_id); if (!block) return 0; return block - bdb; } struct bdb_block_entry { struct list_head node; enum bdb_block_id section_id; u8 data[]; }; static const void * bdb_find_section(struct drm_i915_private *i915, enum bdb_block_id section_id) { struct bdb_block_entry *entry; list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) { if (entry->section_id == section_id) return entry->data + 3; } return NULL; } static const struct { enum bdb_block_id section_id; size_t min_size; } bdb_blocks[] = { { .section_id = BDB_GENERAL_FEATURES, .min_size = sizeof(struct bdb_general_features), }, { .section_id = BDB_GENERAL_DEFINITIONS, .min_size = sizeof(struct bdb_general_definitions), }, { .section_id = BDB_PSR, .min_size = sizeof(struct bdb_psr), }, { .section_id = BDB_DRIVER_FEATURES, .min_size = sizeof(struct bdb_driver_features), }, { .section_id = BDB_SDVO_LVDS_OPTIONS, .min_size = sizeof(struct bdb_sdvo_lvds_options), }, { .section_id = BDB_SDVO_PANEL_DTDS, .min_size = sizeof(struct bdb_sdvo_panel_dtds), }, { .section_id = BDB_EDP, .min_size = sizeof(struct bdb_edp), }, { .section_id = BDB_LVDS_OPTIONS, .min_size = sizeof(struct bdb_lvds_options), }, /* * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS, * so keep the two ordered. */ { .section_id = BDB_LVDS_LFP_DATA_PTRS, .min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), }, { .section_id = BDB_LVDS_LFP_DATA, .min_size = 0, /* special case */ }, { .section_id = BDB_LVDS_BACKLIGHT, .min_size = sizeof(struct bdb_lfp_backlight_data), }, { .section_id = BDB_LFP_POWER, .min_size = sizeof(struct bdb_lfp_power), }, { .section_id = BDB_MIPI_CONFIG, .min_size = sizeof(struct bdb_mipi_config), }, { .section_id = BDB_MIPI_SEQUENCE, .min_size = sizeof(struct bdb_mipi_sequence) }, { .section_id = BDB_COMPRESSION_PARAMETERS, .min_size = sizeof(struct bdb_compression_parameters), }, { .section_id = BDB_GENERIC_DTD, .min_size = sizeof(struct bdb_generic_dtd), }, }; static size_t lfp_data_min_size(struct drm_i915_private *i915) { const struct bdb_lvds_lfp_data_ptrs *ptrs; size_t size; ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return 0; size = sizeof(struct bdb_lvds_lfp_data); if (ptrs->panel_name.table_size) size = max(size, ptrs->panel_name.offset + sizeof(struct bdb_lvds_lfp_data_tail)); return size; } static bool validate_lfp_data_ptrs(const void *bdb, const struct bdb_lvds_lfp_data_ptrs *ptrs) { int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size; int data_block_size, lfp_data_size; const void *data_block; int i; data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); if (!data_block) return false; data_block_size = get_blocksize(data_block); if (data_block_size == 0) return false; /* always 3 indicating the presence of fp_timing+dvo_timing+panel_pnp_id */ if (ptrs->lvds_entries != 3) return false; fp_timing_size = ptrs->ptr[0].fp_timing.table_size; dvo_timing_size = ptrs->ptr[0].dvo_timing.table_size; panel_pnp_id_size = ptrs->ptr[0].panel_pnp_id.table_size; panel_name_size = ptrs->panel_name.table_size; /* fp_timing has variable size */ if (fp_timing_size < 32 || dvo_timing_size != sizeof(struct lvds_dvo_timing) || panel_pnp_id_size != sizeof(struct lvds_pnp_id)) return false; /* panel_name is not present in old VBTs */ if (panel_name_size != 0 && panel_name_size != sizeof(struct lvds_lfp_panel_name)) return false; lfp_data_size = ptrs->ptr[1].fp_timing.offset - ptrs->ptr[0].fp_timing.offset; if (16 * lfp_data_size > data_block_size) return false; /* make sure the table entries have uniform size */ for (i = 1; i < 16; i++) { if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size || ptrs->ptr[i].dvo_timing.table_size != dvo_timing_size || ptrs->ptr[i].panel_pnp_id.table_size != panel_pnp_id_size) return false; if (ptrs->ptr[i].fp_timing.offset - ptrs->ptr[i-1].fp_timing.offset != lfp_data_size || ptrs->ptr[i].dvo_timing.offset - ptrs->ptr[i-1].dvo_timing.offset != lfp_data_size || ptrs->ptr[i].panel_pnp_id.offset - ptrs->ptr[i-1].panel_pnp_id.offset != lfp_data_size) return false; } /* * Except for vlv/chv machines all real VBTs seem to have 6 * unaccounted bytes in the fp_timing table. And it doesn't * appear to be a really intentional hole as the fp_timing * 0xffff terminator is always within those 6 missing bytes. */ if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size) fp_timing_size += 6; if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size) return false; if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset || ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset || ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size) return false; /* make sure the tables fit inside the data block */ for (i = 0; i < 16; i++) { if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size || ptrs->ptr[i].dvo_timing.offset + dvo_timing_size > data_block_size || ptrs->ptr[i].panel_pnp_id.offset + panel_pnp_id_size > data_block_size) return false; } if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size) return false; /* make sure fp_timing terminators are present at expected locations */ for (i = 0; i < 16; i++) { const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset + fp_timing_size - 2; if (*t != 0xffff) return false; } return true; } /* make the data table offsets relative to the data block */ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block) { struct bdb_lvds_lfp_data_ptrs *ptrs = ptrs_block; u32 offset; int i; offset = raw_block_offset(bdb, BDB_LVDS_LFP_DATA); for (i = 0; i < 16; i++) { if (ptrs->ptr[i].fp_timing.offset < offset || ptrs->ptr[i].dvo_timing.offset < offset || ptrs->ptr[i].panel_pnp_id.offset < offset) return false; ptrs->ptr[i].fp_timing.offset -= offset; ptrs->ptr[i].dvo_timing.offset -= offset; ptrs->ptr[i].panel_pnp_id.offset -= offset; } if (ptrs->panel_name.table_size) { if (ptrs->panel_name.offset < offset) return false; ptrs->panel_name.offset -= offset; } return validate_lfp_data_ptrs(bdb, ptrs); } static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table, int table_size, int total_size) { if (total_size < table_size) return total_size; table->table_size = table_size; table->offset = total_size - table_size; return total_size - table_size; } static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, const struct lvds_lfp_data_ptr_table *prev, int size) { next->table_size = prev->table_size; next->offset = prev->offset + size; } static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, const void *bdb) { int i, size, table_size, block_size, offset, fp_timing_size; struct bdb_lvds_lfp_data_ptrs *ptrs; const void *block; void *ptrs_block; /* * The hardcoded fp_timing_size is only valid for * modernish VBTs. All older VBTs definitely should * include block 41 and thus we don't need to * generate one. */ if (i915->display.vbt.version < 155) return NULL; fp_timing_size = 38; block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); if (!block) return NULL; drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n"); block_size = get_blocksize(block); size = fp_timing_size + sizeof(struct lvds_dvo_timing) + sizeof(struct lvds_pnp_id); if (size * 16 > block_size) return NULL; ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL); if (!ptrs_block) return NULL; *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS; *(u16 *)(ptrs_block + 1) = sizeof(*ptrs); ptrs = ptrs_block + 3; table_size = sizeof(struct lvds_pnp_id); size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size); table_size = sizeof(struct lvds_dvo_timing); size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size); table_size = fp_timing_size; size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size); if (ptrs->ptr[0].fp_timing.table_size) ptrs->lvds_entries++; if (ptrs->ptr[0].dvo_timing.table_size) ptrs->lvds_entries++; if (ptrs->ptr[0].panel_pnp_id.table_size) ptrs->lvds_entries++; if (size != 0 || ptrs->lvds_entries != 3) { kfree(ptrs_block); return NULL; } size = fp_timing_size + sizeof(struct lvds_dvo_timing) + sizeof(struct lvds_pnp_id); for (i = 1; i < 16; i++) { next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size); } table_size = sizeof(struct lvds_lfp_panel_name); if (16 * (size + table_size) <= block_size) { ptrs->panel_name.table_size = table_size; ptrs->panel_name.offset = size * 16; } offset = block - bdb; for (i = 0; i < 16; i++) { ptrs->ptr[i].fp_timing.offset += offset; ptrs->ptr[i].dvo_timing.offset += offset; ptrs->ptr[i].panel_pnp_id.offset += offset; } if (ptrs->panel_name.table_size) ptrs->panel_name.offset += offset; return ptrs_block; } static void init_bdb_block(struct drm_i915_private *i915, const void *bdb, enum bdb_block_id section_id, size_t min_size) { struct bdb_block_entry *entry; void *temp_block = NULL; const void *block; size_t block_size; block = find_raw_section(bdb, section_id); /* Modern VBTs lack the LFP data table pointers block, make one up */ if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) { temp_block = generate_lfp_data_ptrs(i915, bdb); if (temp_block) block = temp_block + 3; } if (!block) return; drm_WARN(&i915->drm, min_size == 0, "Block %d min_size is zero\n", section_id); block_size = get_blocksize(block); /* * Version number and new block size are considered * part of the header for MIPI sequenece block v3+. */ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3) block_size += 5; entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), GFP_KERNEL); if (!entry) { kfree(temp_block); return; } entry->section_id = section_id; memcpy(entry->data, block - 3, block_size + 3); kfree(temp_block); drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n", section_id, block_size, min_size); if (section_id == BDB_LVDS_LFP_DATA_PTRS && !fixup_lfp_data_ptrs(bdb, entry->data + 3)) { drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n"); kfree(entry); return; } list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks); } static void init_bdb_blocks(struct drm_i915_private *i915, const void *bdb) { int i; for (i = 0; i < ARRAY_SIZE(bdb_blocks); i++) { enum bdb_block_id section_id = bdb_blocks[i].section_id; size_t min_size = bdb_blocks[i].min_size; if (section_id == BDB_LVDS_LFP_DATA) min_size = lfp_data_min_size(i915); init_bdb_block(i915, bdb, section_id, min_size); } } static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) { panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | dvo_timing->hactive_lo; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + ((dvo_timing->hsync_pulse_width_hi << 8) | dvo_timing->hsync_pulse_width_lo); panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | dvo_timing->vactive_lo; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo); panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + ((dvo_timing->vsync_pulse_width_hi << 4) | dvo_timing->vsync_pulse_width_lo); panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; if (dvo_timing->hsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; if (dvo_timing->vsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | dvo_timing->himage_lo; panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | dvo_timing->vimage_lo; /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; drm_mode_set_name(panel_fixed_mode); } static const struct lvds_dvo_timing * get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *data, const struct bdb_lvds_lfp_data_ptrs *ptrs, int index) { return (const void *)data + ptrs->ptr[index].dvo_timing.offset; } static const struct lvds_fp_timing * get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data, const struct bdb_lvds_lfp_data_ptrs *ptrs, int index) { return (const void *)data + ptrs->ptr[index].fp_timing.offset; } static const struct lvds_pnp_id * get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data, const struct bdb_lvds_lfp_data_ptrs *ptrs, int index) { return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset; } static const struct bdb_lvds_lfp_data_tail * get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, const struct bdb_lvds_lfp_data_ptrs *ptrs) { if (ptrs->panel_name.table_size) return (const void *)data + ptrs->panel_name.offset; else return NULL; } static void dump_pnp_id(struct drm_i915_private *i915, const struct lvds_pnp_id *pnp_id, const char *name) { u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name); char vend[4]; drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n", name, drm_edid_decode_mfg_id(mfg_name, vend), pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial, pnp_id->mfg_week, pnp_id->mfg_year + 1990); } static int opregion_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { return intel_opregion_get_panel_type(i915); } static int vbt_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { const struct bdb_lvds_options *lvds_options; lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) return -1; if (lvds_options->panel_type > 0xf && lvds_options->panel_type != 0xff) { drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n", lvds_options->panel_type); return -1; } if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2) return lvds_options->panel_type2; drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1); return lvds_options->panel_type; } static int pnpid_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_ptrs *ptrs; const struct lvds_pnp_id *edid_id; struct lvds_pnp_id edid_id_nodate; const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */ int i, best = -1; if (!edid) return -1; edid_id = (const void *)&edid->mfg_id[0]; edid_id_nodate = *edid_id; edid_id_nodate.mfg_week = 0; edid_id_nodate.mfg_year = 0; dump_pnp_id(i915, edid_id, "EDID"); ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return -1; data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); if (!data) return -1; for (i = 0; i < 16; i++) { const struct lvds_pnp_id *vbt_id = get_lvds_pnp_id(data, ptrs, i); /* full match? */ if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id))) return i; /* * Accept a match w/o date if no full match is found, * and the VBT entry does not specify a date. */ if (best < 0 && !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id))) best = i; } return best; } static int fallback_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { return use_fallback ? 0 : -1; } enum panel_type { PANEL_TYPE_OPREGION, PANEL_TYPE_VBT, PANEL_TYPE_PNPID, PANEL_TYPE_FALLBACK, }; static int get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { struct { const char *name; int (*get_panel_type)(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback); int panel_type; } panel_types[] = { [PANEL_TYPE_OPREGION] = { .name = "OpRegion", .get_panel_type = opregion_get_panel_type, }, [PANEL_TYPE_VBT] = { .name = "VBT", .get_panel_type = vbt_get_panel_type, }, [PANEL_TYPE_PNPID] = { .name = "PNPID", .get_panel_type = pnpid_get_panel_type, }, [PANEL_TYPE_FALLBACK] = { .name = "fallback", .get_panel_type = fallback_get_panel_type, }, }; int i; for (i = 0; i < ARRAY_SIZE(panel_types); i++) { panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, drm_edid, use_fallback); drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf && panel_types[i].panel_type != 0xff); if (panel_types[i].panel_type >= 0) drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n", panel_types[i].name, panel_types[i].panel_type); } if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0) i = PANEL_TYPE_OPREGION; else if (panel_types[PANEL_TYPE_VBT].panel_type == 0xff && panel_types[PANEL_TYPE_PNPID].panel_type >= 0) i = PANEL_TYPE_PNPID; else if (panel_types[PANEL_TYPE_VBT].panel_type != 0xff && panel_types[PANEL_TYPE_VBT].panel_type >= 0) i = PANEL_TYPE_VBT; else i = PANEL_TYPE_FALLBACK; drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n", panel_types[i].name, panel_types[i].panel_type); return panel_types[i].panel_type; } static unsigned int panel_bits(unsigned int value, int panel_type, int num_bits) { return (value >> (panel_type * num_bits)) & (BIT(num_bits) - 1); } static bool panel_bool(unsigned int value, int panel_type) { return panel_bits(value, panel_type, 1); } /* Parse general panel options */ static void parse_panel_options(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_lvds_options *lvds_options; int panel_type = panel->vbt.panel_type; int drrs_mode; lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) return; panel->vbt.lvds_dither = lvds_options->pixel_dither; /* * Empirical evidence indicates the block size can be * either 4,14,16,24+ bytes. For older VBTs no clear * relationship between the block size vs. BDB version. */ if (get_blocksize(lvds_options) < 16) return; drrs_mode = panel_bits(lvds_options->dps_panel_type_bits, panel_type, 2); /* * VBT has static DRRS = 0 and seamless DRRS = 2. * The below piece of code is required to adjust vbt.drrs_type * to match the enum drrs_support_type. */ switch (drrs_mode) { case 0: panel->vbt.drrs_type = DRRS_TYPE_STATIC; drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n"); break; case 2: panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS; drm_dbg_kms(&i915->drm, "DRRS supported mode is seamless\n"); break; default: panel->vbt.drrs_type = DRRS_TYPE_NONE; drm_dbg_kms(&i915->drm, "DRRS not supported (VBT input)\n"); break; } } static void parse_lfp_panel_dtd(struct drm_i915_private *i915, struct intel_panel *panel, const struct bdb_lvds_lfp_data *lvds_lfp_data, const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs) { const struct lvds_dvo_timing *panel_dvo_timing; const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int panel_type = panel->vbt.panel_type; panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, panel_type); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; drm_dbg_kms(&i915->drm, "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); fp_timing = get_lvds_fp_timing(lvds_lfp_data, lvds_lfp_data_ptrs, panel_type); /* check the resolution, just to be sure */ if (fp_timing->x_res == panel_fixed_mode->hdisplay && fp_timing->y_res == panel_fixed_mode->vdisplay) { panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val; drm_dbg_kms(&i915->drm, "VBT initial LVDS value %x\n", panel->vbt.bios_lvds_val); } } static void parse_lfp_data(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; const struct lvds_pnp_id *pnp_id; int panel_type = panel->vbt.panel_type; ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return; data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); if (!data) return; if (!panel->vbt.lfp_lvds_vbt_mode) parse_lfp_panel_dtd(i915, panel, data, ptrs); pnp_id = get_lvds_pnp_id(data, ptrs, panel_type); dump_pnp_id(i915, pnp_id, "Panel"); tail = get_lfp_data_tail(data, ptrs); if (!tail) return; drm_dbg_kms(&i915->drm, "Panel name: %.*s\n", (int)sizeof(tail->panel_name[0].name), tail->panel_name[panel_type].name); if (i915->display.vbt.version >= 188) { panel->vbt.seamless_drrs_min_refresh_rate = tail->seamless_drrs_min_refresh_rate[panel_type]; drm_dbg_kms(&i915->drm, "Seamless DRRS min refresh rate: %d Hz\n", panel->vbt.seamless_drrs_min_refresh_rate); } } static void parse_generic_dtd(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_generic_dtd *generic_dtd; const struct generic_dtd_entry *dtd; struct drm_display_mode *panel_fixed_mode; int num_dtd; /* * Older VBTs provided DTD information for internal displays through * the "LFP panel tables" block (42). As of VBT revision 229 the * DTD information should be provided via a newer "generic DTD" * block (58). Just to be safe, we'll try the new generic DTD block * first on VBT >= 229, but still fall back to trying the old LFP * block if that fails. */ if (i915->display.vbt.version < 229) return; generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD); if (!generic_dtd) return; if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) { drm_err(&i915->drm, "GDTD size %u is too small.\n", generic_dtd->gdtd_size); return; } else if (generic_dtd->gdtd_size != sizeof(struct generic_dtd_entry)) { drm_err(&i915->drm, "Unexpected GDTD size %u\n", generic_dtd->gdtd_size); /* DTD has unknown fields, but keep going */ } num_dtd = (get_blocksize(generic_dtd) - sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size; if (panel->vbt.panel_type >= num_dtd) { drm_err(&i915->drm, "Panel type %d not found in table of %d DTD's\n", panel->vbt.panel_type, num_dtd); return; } dtd = &generic_dtd->dtd[panel->vbt.panel_type]; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; panel_fixed_mode->hdisplay = dtd->hactive; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + dtd->hfront_porch; panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + dtd->hsync; panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + dtd->hblank; panel_fixed_mode->vdisplay = dtd->vactive; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + dtd->vfront_porch; panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + dtd->vsync; panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + dtd->vblank; panel_fixed_mode->clock = dtd->pixel_clock; panel_fixed_mode->width_mm = dtd->width_mm; panel_fixed_mode->height_mm = dtd->height_mm; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(panel_fixed_mode); if (dtd->hsync_positive_polarity) panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; if (dtd->vsync_positive_polarity) panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; drm_dbg_kms(&i915->drm, "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; } static void parse_lfp_backlight(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_lfp_backlight_data *backlight_data; const struct lfp_backlight_data_entry *entry; int panel_type = panel->vbt.panel_type; u16 level; backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT); if (!backlight_data) return; if (backlight_data->entry_size != sizeof(backlight_data->data[0])) { drm_dbg_kms(&i915->drm, "Unsupported backlight data entry size %u\n", backlight_data->entry_size); return; } entry = &backlight_data->data[panel_type]; panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; if (!panel->vbt.backlight.present) { drm_dbg_kms(&i915->drm, "PWM backlight not present in VBT (type %u)\n", entry->type); return; } panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; panel->vbt.backlight.controller = 0; if (i915->display.vbt.version >= 191) { size_t exp_size; if (i915->display.vbt.version >= 236) exp_size = sizeof(struct bdb_lfp_backlight_data); else if (i915->display.vbt.version >= 234) exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; else exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; if (get_blocksize(backlight_data) >= exp_size) { const struct lfp_backlight_control_method *method; method = &backlight_data->backlight_control[panel_type]; panel->vbt.backlight.type = method->type; panel->vbt.backlight.controller = method->controller; } } panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; panel->vbt.backlight.active_low_pwm = entry->active_low_pwm; if (i915->display.vbt.version >= 234) { u16 min_level; bool scale; level = backlight_data->brightness_level[panel_type].level; min_level = backlight_data->brightness_min_level[panel_type].level; if (i915->display.vbt.version >= 236) scale = backlight_data->brightness_precision_bits[panel_type] == 16; else scale = level > 255; if (scale) min_level = min_level / 255; if (min_level > 255) { drm_warn(&i915->drm, "Brightness min level > 255\n"); level = 255; } panel->vbt.backlight.min_brightness = min_level; panel->vbt.backlight.brightness_precision_bits = backlight_data->brightness_precision_bits[panel_type]; } else { level = backlight_data->level[panel_type]; panel->vbt.backlight.min_brightness = entry->min_brightness; } if (i915->display.vbt.version >= 239) panel->vbt.backlight.hdr_dpcd_refresh_timeout = DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100); else panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30; drm_dbg_kms(&i915->drm, "VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u, controller %u\n", panel->vbt.backlight.pwm_freq_hz, panel->vbt.backlight.active_low_pwm ? "low" : "high", panel->vbt.backlight.min_brightness, level, panel->vbt.backlight.controller); } /* Try to find sdvo panel data */ static void parse_sdvo_panel_data(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_sdvo_panel_dtds *dtds; struct drm_display_mode *panel_fixed_mode; int index; index = i915->params.vbt_sdvo_panel_type; if (index == -2) { drm_dbg_kms(&i915->drm, "Ignore SDVO panel mode from BIOS VBT tables.\n"); return; } if (index == -1) { const struct bdb_sdvo_lvds_options *sdvo_lvds_options; sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS); if (!dtds) return; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]); panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; drm_dbg_kms(&i915->drm, "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n", DRM_MODE_ARG(panel_fixed_mode)); } static int intel_bios_ssc_frequency(struct drm_i915_private *i915, bool alternate) { switch (DISPLAY_VER(i915)) { case 2: return alternate ? 66667 : 48000; case 3: case 4: return alternate ? 100000 : 96000; default: return alternate ? 100000 : 120000; } } static void parse_general_features(struct drm_i915_private *i915) { const struct bdb_general_features *general; general = bdb_find_section(i915, BDB_GENERAL_FEATURES); if (!general) return; i915->display.vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ if (i915->display.vbt.version >= 155 && (HAS_DDI(i915) || IS_VALLEYVIEW(i915))) i915->display.vbt.int_crt_support = general->int_crt_support; i915->display.vbt.lvds_use_ssc = general->enable_ssc; i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, general->ssc_freq); i915->display.vbt.display_clock_mode = general->display_clock_mode; i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; if (i915->display.vbt.version >= 181) { i915->display.vbt.orientation = general->rotate_180 ? DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : DRM_MODE_PANEL_ORIENTATION_NORMAL; } else { i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } if (i915->display.vbt.version >= 249 && general->afc_startup_config) { i915->display.vbt.override_afc_startup = true; i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; } drm_dbg_kms(&i915->drm, "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", i915->display.vbt.int_tv_support, i915->display.vbt.int_crt_support, i915->display.vbt.lvds_use_ssc, i915->display.vbt.lvds_ssc_freq, i915->display.vbt.display_clock_mode, i915->display.vbt.fdi_rx_polarity_inverted); } static const struct child_device_config * child_device_ptr(const struct bdb_general_definitions *defs, int i) { return (const void *) &defs->devices[i * defs->child_dev_size]; } static void parse_sdvo_device_mapping(struct drm_i915_private *i915) { const struct intel_bios_encoder_data *devdata; int count = 0; /* * Only parse SDVO mappings on gens that could have SDVO. This isn't * accurate and doesn't have to be, as long as it's not too strict. */ if (!IS_DISPLAY_VER(i915, 3, 7)) { drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n"); return; } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; struct sdvo_device_mapping *mapping; if (child->slave_addr != SLAVE_ADDR1 && child->slave_addr != SLAVE_ADDR2) { /* * If the slave address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; } if (child->dvo_port != DEVICE_PORT_DVOB && child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ drm_dbg_kms(&i915->drm, "Incorrect SDVO port. Skip it\n"); continue; } drm_dbg_kms(&i915->drm, "the SDVO device with slave addr %2x is found on" " %s port\n", child->slave_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1]; if (!mapping->initialized) { mapping->dvo_port = child->dvo_port; mapping->slave_addr = child->slave_addr; mapping->dvo_wiring = child->dvo_wiring; mapping->ddc_pin = child->ddc_pin; mapping->i2c_pin = child->i2c_pin; mapping->initialized = 1; drm_dbg_kms(&i915->drm, "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", mapping->dvo_port, mapping->slave_addr, mapping->dvo_wiring, mapping->ddc_pin, mapping->i2c_pin); } else { drm_dbg_kms(&i915->drm, "Maybe one SDVO port is shared by " "two SDVO device.\n"); } if (child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ drm_dbg_kms(&i915->drm, "there exists the slave2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; } if (!count) { /* No SDVO device info is found */ drm_dbg_kms(&i915->drm, "No SDVO device info is found in VBT\n"); } } static void parse_driver_features(struct drm_i915_private *i915) { const struct bdb_driver_features *driver; driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); if (!driver) return; if (DISPLAY_VER(i915) >= 5) { /* * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS * to mean "eDP". The VBT spec doesn't agree with that * interpretation, but real world VBTs seem to. */ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS) i915->display.vbt.int_lvds_support = 0; } else { /* * FIXME it's not clear which BDB version has the LVDS config * bits defined. Revision history in the VBT spec says: * "0.92 | Add two definitions for VBT value of LVDS Active * Config (00b and 11b values defined) | 06/13/2005" * but does not the specify the BDB version. * * So far version 134 (on i945gm) is the oldest VBT observed * in the wild with the bits correctly populated. Version * 108 (on i85x) does not have the bits correctly populated. */ if (i915->display.vbt.version >= 134 && driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS && driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) i915->display.vbt.int_lvds_support = 0; } } static void parse_panel_driver_features(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_driver_features *driver; driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); if (!driver) return; if (i915->display.vbt.version < 228) { drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n", driver->drrs_enabled); /* * If DRRS is not supported, drrs_type has to be set to 0. * This is because, VBT is configured in such a way that * static DRRS is 0 and DRRS not supported is represented by * driver->drrs_enabled=false */ if (!driver->drrs_enabled && panel->vbt.drrs_type != DRRS_TYPE_NONE) { /* * FIXME Should DMRRS perhaps be treated as seamless * but without the automatic downclocking? */ if (driver->dmrrs_enabled) panel->vbt.drrs_type = DRRS_TYPE_STATIC; else panel->vbt.drrs_type = DRRS_TYPE_NONE; } panel->vbt.psr.enable = driver->psr_enabled; } } static void parse_power_conservation_features(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_lfp_power *power; u8 panel_type = panel->vbt.panel_type; panel->vbt.vrr = true; /* matches Windows behaviour */ if (i915->display.vbt.version < 228) return; power = bdb_find_section(i915, BDB_LFP_POWER); if (!power) return; panel->vbt.psr.enable = panel_bool(power->psr, panel_type); /* * If DRRS is not supported, drrs_type has to be set to 0. * This is because, VBT is configured in such a way that * static DRRS is 0 and DRRS not supported is represented by * power->drrs & BIT(panel_type)=false */ if (!panel_bool(power->drrs, panel_type) && panel->vbt.drrs_type != DRRS_TYPE_NONE) { /* * FIXME Should DMRRS perhaps be treated as seamless * but without the automatic downclocking? */ if (panel_bool(power->dmrrs, panel_type)) panel->vbt.drrs_type = DRRS_TYPE_STATIC; else panel->vbt.drrs_type = DRRS_TYPE_NONE; } if (i915->display.vbt.version >= 232) panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type); if (i915->display.vbt.version >= 233) panel->vbt.vrr = panel_bool(power->vrr_feature_enabled, panel_type); } static void parse_edp(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_edp *edp; const struct edp_power_seq *edp_pps; const struct edp_fast_link_params *edp_link_params; int panel_type = panel->vbt.panel_type; edp = bdb_find_section(i915, BDB_EDP); if (!edp) return; switch (panel_bits(edp->color_depth, panel_type, 2)) { case EDP_18BPP: panel->vbt.edp.bpp = 18; break; case EDP_24BPP: panel->vbt.edp.bpp = 24; break; case EDP_30BPP: panel->vbt.edp.bpp = 30; break; } /* Get the eDP sequencing and link info */ edp_pps = &edp->power_seqs[panel_type]; edp_link_params = &edp->fast_link_params[panel_type]; panel->vbt.edp.pps = *edp_pps; if (i915->display.vbt.version >= 224) { panel->vbt.edp.rate = edp->edp_fast_link_training_rate[panel_type] * 20; } else { switch (edp_link_params->rate) { case EDP_RATE_1_62: panel->vbt.edp.rate = 162000; break; case EDP_RATE_2_7: panel->vbt.edp.rate = 270000; break; case EDP_RATE_5_4: panel->vbt.edp.rate = 540000; break; default: drm_dbg_kms(&i915->drm, "VBT has unknown eDP link rate value %u\n", edp_link_params->rate); break; } } switch (edp_link_params->lanes) { case EDP_LANE_1: panel->vbt.edp.lanes = 1; break; case EDP_LANE_2: panel->vbt.edp.lanes = 2; break; case EDP_LANE_4: panel->vbt.edp.lanes = 4; break; default: drm_dbg_kms(&i915->drm, "VBT has unknown eDP lane count value %u\n", edp_link_params->lanes); break; } switch (edp_link_params->preemphasis) { case EDP_PREEMPHASIS_NONE: panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0; break; case EDP_PREEMPHASIS_3_5dB: panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1; break; case EDP_PREEMPHASIS_6dB: panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2; break; case EDP_PREEMPHASIS_9_5dB: panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; break; default: drm_dbg_kms(&i915->drm, "VBT has unknown eDP pre-emphasis value %u\n", edp_link_params->preemphasis); break; } switch (edp_link_params->vswing) { case EDP_VSWING_0_4V: panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; break; case EDP_VSWING_0_6V: panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1; break; case EDP_VSWING_0_8V: panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; break; case EDP_VSWING_1_2V: panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; break; default: drm_dbg_kms(&i915->drm, "VBT has unknown eDP voltage swing value %u\n", edp_link_params->vswing); break; } if (i915->display.vbt.version >= 173) { u8 vswing; /* Don't read from VBT if module parameter has valid value*/ if (i915->params.edp_vswing) { panel->vbt.edp.low_vswing = i915->params.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; panel->vbt.edp.low_vswing = vswing == 0; } } panel->vbt.edp.drrs_msa_timing_delay = panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2); if (i915->display.vbt.version >= 244) panel->vbt.edp.max_link_rate = edp->edp_max_port_link_rate[panel_type] * 20; } static void parse_psr(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_psr *psr; const struct psr_table *psr_table; int panel_type = panel->vbt.panel_type; psr = bdb_find_section(i915, BDB_PSR); if (!psr) { drm_dbg_kms(&i915->drm, "No PSR BDB found.\n"); return; } psr_table = &psr->psr_table[panel_type]; panel->vbt.psr.full_link = psr_table->full_link; panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup; /* Allowed VBT values goes from 0 to 15 */ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; /* * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. */ if (i915->display.vbt.version >= 205 && (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) { switch (psr_table->tp1_wakeup_time) { case 0: panel->vbt.psr.tp1_wakeup_time_us = 500; break; case 1: panel->vbt.psr.tp1_wakeup_time_us = 100; break; case 3: panel->vbt.psr.tp1_wakeup_time_us = 0; break; default: drm_dbg_kms(&i915->drm, "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp1_wakeup_time); fallthrough; case 2: panel->vbt.psr.tp1_wakeup_time_us = 2500; break; } switch (psr_table->tp2_tp3_wakeup_time) { case 0: panel->vbt.psr.tp2_tp3_wakeup_time_us = 500; break; case 1: panel->vbt.psr.tp2_tp3_wakeup_time_us = 100; break; case 3: panel->vbt.psr.tp2_tp3_wakeup_time_us = 0; break; default: drm_dbg_kms(&i915->drm, "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp2_tp3_wakeup_time); fallthrough; case 2: panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500; break; } } else { panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } if (i915->display.vbt.version >= 226) { u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = panel_bits(wakeup_time, panel_type, 2); switch (wakeup_time) { case 0: wakeup_time = 500; break; case 1: wakeup_time = 100; break; case 3: wakeup_time = 50; break; default: case 2: wakeup_time = 2500; break; } panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time; } else { /* Reusing PSR1 wakeup time for PSR2 in older VBTs */ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us; } } static void parse_dsi_backlight_ports(struct drm_i915_private *i915, struct intel_panel *panel, enum port port) { enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C; if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) { panel->vbt.dsi.bl_ports = BIT(port); if (panel->vbt.dsi.config->cabc_supported) panel->vbt.dsi.cabc_ports = BIT(port); return; } switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) { case DL_DCS_PORT_A: panel->vbt.dsi.bl_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: panel->vbt.dsi.bl_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc); break; } if (!panel->vbt.dsi.config->cabc_supported) return; switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) { case DL_DCS_PORT_A: panel->vbt.dsi.cabc_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: panel->vbt.dsi.cabc_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: panel->vbt.dsi.cabc_ports = BIT(PORT_A) | BIT(port_bc); break; } } static void parse_mipi_config(struct drm_i915_private *i915, struct intel_panel *panel) { const struct bdb_mipi_config *start; const struct mipi_config *config; const struct mipi_pps_data *pps; int panel_type = panel->vbt.panel_type; enum port port; /* parse MIPI blocks only if LFP type is MIPI */ if (!intel_bios_is_dsi_present(i915, &port)) return; /* Initialize this to undefined indicating no generic MIPI support */ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID; /* Block #40 is already parsed and panel_fixed_mode is * stored in i915->lfp_lvds_vbt_mode * resuse this when needed */ /* Parse #52 for panel index used from panel_type already * parsed */ start = bdb_find_section(i915, BDB_MIPI_CONFIG); if (!start) { drm_dbg_kms(&i915->drm, "No MIPI config BDB found"); return; } drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n", panel_type); /* * get hold of the correct configuration block and pps data as per * the panel_type as index */ config = &start->config[panel_type]; pps = &start->pps[panel_type]; /* store as of now full data. Trim when we realise all is not needed */ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL); if (!panel->vbt.dsi.config) return; panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL); if (!panel->vbt.dsi.pps) { kfree(panel->vbt.dsi.config); return; } parse_dsi_backlight_ports(i915, panel, port); /* FIXME is the 90 vs. 270 correct? */ switch (config->rotation) { case ENABLE_ROTATION_0: /* * Most (all?) VBTs claim 0 degrees despite having * an upside down panel, thus we do not trust this. */ panel->vbt.dsi.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; break; case ENABLE_ROTATION_90: panel->vbt.dsi.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP; break; case ENABLE_ROTATION_180: panel->vbt.dsi.orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; break; case ENABLE_ROTATION_270: panel->vbt.dsi.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP; break; } /* We have mandatory mipi config blocks. Initialize as generic panel */ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } /* Find the sequence block and size for the given panel. */ static const u8 * find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, u16 panel_id, u32 *seq_size) { u32 total = get_blocksize(sequence); const u8 *data = &sequence->data[0]; u8 current_id; u32 current_size; int header_size = sequence->version >= 3 ? 5 : 3; int index = 0; int i; /* skip new block size */ if (sequence->version >= 3) data += 4; for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { if (index + header_size > total) { DRM_ERROR("Invalid sequence block (header)\n"); return NULL; } current_id = *(data + index); if (sequence->version >= 3) current_size = *((const u32 *)(data + index + 1)); else current_size = *((const u16 *)(data + index + 1)); index += header_size; if (index + current_size > total) { DRM_ERROR("Invalid sequence block\n"); return NULL; } if (current_id == panel_id) { *seq_size = current_size; return data + index; } index += current_size; } DRM_ERROR("Sequence block detected but no valid configuration\n"); return NULL; } static int goto_next_sequence(const u8 *data, int index, int total) { u16 len; /* Skip Sequence Byte. */ for (index = index + 1; index < total; index += len) { u8 operation_byte = *(data + index); index++; switch (operation_byte) { case MIPI_SEQ_ELEM_END: return index; case MIPI_SEQ_ELEM_SEND_PKT: if (index + 4 > total) return 0; len = *((const u16 *)(data + index + 2)) + 4; break; case MIPI_SEQ_ELEM_DELAY: len = 4; break; case MIPI_SEQ_ELEM_GPIO: len = 2; break; case MIPI_SEQ_ELEM_I2C: if (index + 7 > total) return 0; len = *(data + index + 6) + 7; break; default: DRM_ERROR("Unknown operation byte\n"); return 0; } } return 0; } static int goto_next_sequence_v3(const u8 *data, int index, int total) { int seq_end; u16 len; u32 size_of_sequence; /* * Could skip sequence based on Size of Sequence alone, but also do some * checking on the structure. */ if (total < 5) { DRM_ERROR("Too small sequence size\n"); return 0; } /* Skip Sequence Byte. */ index++; /* * Size of Sequence. Excludes the Sequence Byte and the size itself, * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END * byte. */ size_of_sequence = *((const u32 *)(data + index)); index += 4; seq_end = index + size_of_sequence; if (seq_end > total) { DRM_ERROR("Invalid sequence size\n"); return 0; } for (; index < total; index += len) { u8 operation_byte = *(data + index); index++; if (operation_byte == MIPI_SEQ_ELEM_END) { if (index != seq_end) { DRM_ERROR("Invalid element structure\n"); return 0; } return index; } len = *(data + index); index++; /* * FIXME: Would be nice to check elements like for v1/v2 in * goto_next_sequence() above. */ switch (operation_byte) { case MIPI_SEQ_ELEM_SEND_PKT: case MIPI_SEQ_ELEM_DELAY: case MIPI_SEQ_ELEM_GPIO: case MIPI_SEQ_ELEM_I2C: case MIPI_SEQ_ELEM_SPI: case MIPI_SEQ_ELEM_PMIC: break; default: DRM_ERROR("Unknown operation byte %u\n", operation_byte); break; } } return 0; } /* * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, * skip all delay + gpio operands and stop at the first DSI packet op. */ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915, struct intel_panel *panel) { const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; int index, len; if (drm_WARN_ON(&i915->drm, !data || panel->vbt.dsi.seq_version != 1)) return 0; /* index = 1 to skip sequence byte */ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { switch (data[index]) { case MIPI_SEQ_ELEM_SEND_PKT: return index == 1 ? 0 : index; case MIPI_SEQ_ELEM_DELAY: len = 5; /* 1 byte for operand + uint32 */ break; case MIPI_SEQ_ELEM_GPIO: len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ break; default: return 0; } } return 0; } /* * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. * The deassert must be done before calling intel_dsi_device_ready, so for * these devices we split the init OTP sequence into a deassert sequence and * the actual init OTP part. */ static void fixup_mipi_sequences(struct drm_i915_private *i915, struct intel_panel *panel) { u8 *init_otp; int len; /* Limit this to VLV for now. */ if (!IS_VALLEYVIEW(i915)) return; /* Limit this to v1 vid-mode sequences */ if (panel->vbt.dsi.config->is_cmd_mode || panel->vbt.dsi.seq_version != 1) return; /* Only do this if there are otp and assert seqs and no deassert seq */ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) return; /* The deassert-sequence ends at the first DSI packet */ len = get_init_otp_deassert_fragment_len(i915, panel); if (!len) return; drm_dbg_kms(&i915->drm, "Using init OTP fragment to deassert reset\n"); /* Copy the fragment, update seq byte and terminate it */ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); if (!panel->vbt.dsi.deassert_seq) return; panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; /* Use the copy for deassert */ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = panel->vbt.dsi.deassert_seq; /* Replace the last byte of the fragment with init OTP seq byte */ init_otp[len - 1] = MIPI_SEQ_INIT_OTP; /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; } static void parse_mipi_sequence(struct drm_i915_private *i915, struct intel_panel *panel) { int panel_type = panel->vbt.panel_type; const struct bdb_mipi_sequence *sequence; const u8 *seq_data; u32 seq_size; u8 *data; int index = 0; /* Only our generic panel driver uses the sequence block. */ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) return; sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE); if (!sequence) { drm_dbg_kms(&i915->drm, "No MIPI Sequence found, parsing complete\n"); return; } /* Fail gracefully for forward incompatible sequence block. */ if (sequence->version >= 4) { drm_err(&i915->drm, "Unable to parse MIPI Sequence Block v%u\n", sequence->version); return; } drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n", sequence->version); seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); if (!seq_data) return; data = kmemdup(seq_data, seq_size, GFP_KERNEL); if (!data) return; /* Parse the sequences, store pointers to each sequence. */ for (;;) { u8 seq_id = *(data + index); if (seq_id == MIPI_SEQ_END) break; if (seq_id >= MIPI_SEQ_MAX) { drm_err(&i915->drm, "Unknown sequence %u\n", seq_id); goto err; } /* Log about presence of sequences we won't run. */ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF) drm_dbg_kms(&i915->drm, "Unsupported sequence %u\n", seq_id); panel->vbt.dsi.sequence[seq_id] = data + index; if (sequence->version >= 3) index = goto_next_sequence_v3(data, index, seq_size); else index = goto_next_sequence(data, index, seq_size); if (!index) { drm_err(&i915->drm, "Invalid sequence %u\n", seq_id); goto err; } } panel->vbt.dsi.data = data; panel->vbt.dsi.size = seq_size; panel->vbt.dsi.seq_version = sequence->version; fixup_mipi_sequences(i915, panel); drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n"); return; err: kfree(data); memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence)); } static void parse_compression_parameters(struct drm_i915_private *i915) { const struct bdb_compression_parameters *params; struct intel_bios_encoder_data *devdata; u16 block_size; int index; if (i915->display.vbt.version < 198) return; params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS); if (params) { /* Sanity checks */ if (params->entry_size != sizeof(params->data[0])) { drm_dbg_kms(&i915->drm, "VBT: unsupported compression param entry size\n"); return; } block_size = get_blocksize(params); if (block_size < sizeof(*params)) { drm_dbg_kms(&i915->drm, "VBT: expected 16 compression param entries\n"); return; } } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; if (!child->compression_enable) continue; if (!params) { drm_dbg_kms(&i915->drm, "VBT: compression params not available\n"); continue; } if (child->compression_method_cps) { drm_dbg_kms(&i915->drm, "VBT: CPS compression not supported\n"); continue; } index = child->compression_structure_index; devdata->dsc = kmemdup(&params->data[index], sizeof(*devdata->dsc), GFP_KERNEL); } } static u8 translate_iboost(u8 val) { static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */ if (val >= ARRAY_SIZE(mapping)) { DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val); return 0; } return mapping[val]; } static const u8 cnp_ddc_pin_map[] = { [0] = 0, /* N/A */ [GMBUS_PIN_1_BXT] = DDC_BUS_DDI_B, [GMBUS_PIN_2_BXT] = DDC_BUS_DDI_C, [GMBUS_PIN_4_CNP] = DDC_BUS_DDI_D, /* sic */ [GMBUS_PIN_3_BXT] = DDC_BUS_DDI_F, /* sic */ }; static const u8 icp_ddc_pin_map[] = { [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, [GMBUS_PIN_3_BXT] = TGL_DDC_BUS_DDI_C, [GMBUS_PIN_9_TC1_ICP] = ICL_DDC_BUS_PORT_1, [GMBUS_PIN_10_TC2_ICP] = ICL_DDC_BUS_PORT_2, [GMBUS_PIN_11_TC3_ICP] = ICL_DDC_BUS_PORT_3, [GMBUS_PIN_12_TC4_ICP] = ICL_DDC_BUS_PORT_4, [GMBUS_PIN_13_TC5_TGP] = TGL_DDC_BUS_PORT_5, [GMBUS_PIN_14_TC6_TGP] = TGL_DDC_BUS_PORT_6, }; static const u8 rkl_pch_tgp_ddc_pin_map[] = { [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, [GMBUS_PIN_9_TC1_ICP] = RKL_DDC_BUS_DDI_D, [GMBUS_PIN_10_TC2_ICP] = RKL_DDC_BUS_DDI_E, }; static const u8 adls_ddc_pin_map[] = { [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, [GMBUS_PIN_9_TC1_ICP] = ADLS_DDC_BUS_PORT_TC1, [GMBUS_PIN_10_TC2_ICP] = ADLS_DDC_BUS_PORT_TC2, [GMBUS_PIN_11_TC3_ICP] = ADLS_DDC_BUS_PORT_TC3, [GMBUS_PIN_12_TC4_ICP] = ADLS_DDC_BUS_PORT_TC4, }; static const u8 gen9bc_tgp_ddc_pin_map[] = { [GMBUS_PIN_2_BXT] = DDC_BUS_DDI_B, [GMBUS_PIN_9_TC1_ICP] = DDC_BUS_DDI_C, [GMBUS_PIN_10_TC2_ICP] = DDC_BUS_DDI_D, }; static const u8 adlp_ddc_pin_map[] = { [GMBUS_PIN_1_BXT] = ICL_DDC_BUS_DDI_A, [GMBUS_PIN_2_BXT] = ICL_DDC_BUS_DDI_B, [GMBUS_PIN_9_TC1_ICP] = ADLP_DDC_BUS_PORT_TC1, [GMBUS_PIN_10_TC2_ICP] = ADLP_DDC_BUS_PORT_TC2, [GMBUS_PIN_11_TC3_ICP] = ADLP_DDC_BUS_PORT_TC3, [GMBUS_PIN_12_TC4_ICP] = ADLP_DDC_BUS_PORT_TC4, }; static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) { const u8 *ddc_pin_map; int i, n_entries; if (HAS_PCH_MTP(i915) || IS_ALDERLAKE_P(i915)) { ddc_pin_map = adlp_ddc_pin_map; n_entries = ARRAY_SIZE(adlp_ddc_pin_map); } else if (IS_ALDERLAKE_S(i915)) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { return vbt_pin; } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) { ddc_pin_map = gen9bc_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { ddc_pin_map = icp_ddc_pin_map; n_entries = ARRAY_SIZE(icp_ddc_pin_map); } else if (HAS_PCH_CNP(i915)) { ddc_pin_map = cnp_ddc_pin_map; n_entries = ARRAY_SIZE(cnp_ddc_pin_map); } else { /* Assuming direct map */ return vbt_pin; } for (i = 0; i < n_entries; i++) { if (ddc_pin_map[i] == vbt_pin) return i; } drm_dbg_kms(&i915->drm, "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin); return 0; } static u8 dvo_port_type(u8 dvo_port) { switch (dvo_port) { case DVO_PORT_HDMIA: case DVO_PORT_HDMIB: case DVO_PORT_HDMIC: case DVO_PORT_HDMID: case DVO_PORT_HDMIE: case DVO_PORT_HDMIF: case DVO_PORT_HDMIG: case DVO_PORT_HDMIH: case DVO_PORT_HDMII: return DVO_PORT_HDMIA; case DVO_PORT_DPA: case DVO_PORT_DPB: case DVO_PORT_DPC: case DVO_PORT_DPD: case DVO_PORT_DPE: case DVO_PORT_DPF: case DVO_PORT_DPG: case DVO_PORT_DPH: case DVO_PORT_DPI: return DVO_PORT_DPA; case DVO_PORT_MIPIA: case DVO_PORT_MIPIB: case DVO_PORT_MIPIC: case DVO_PORT_MIPID: return DVO_PORT_MIPIA; default: return dvo_port; } } static enum port __dvo_port_to_port(int n_ports, int n_dvo, const int port_mapping[][3], u8 dvo_port) { enum port port; int i; for (port = PORT_A; port < n_ports; port++) { for (i = 0; i < n_dvo; i++) { if (port_mapping[port][i] == -1) break; if (dvo_port == port_mapping[port][i]) return port; } } return PORT_NONE; } static enum port dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port) { /* * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. */ static const int port_mapping[][3] = { [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT }, [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 }, [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 }, [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 }, [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; /* * RKL VBT uses PHY based mapping. Combo PHYs A,B,C,D * map to DDI A,B,TC1,TC2 respectively. */ static const int rkl_port_mapping[][3] = { [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, [PORT_C] = { -1 }, [PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, [PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, }; /* * Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E, * PORT_F and PORT_G, we need to map that to correct VBT sections. */ static const int adls_port_mapping[][3] = { [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, [PORT_B] = { -1 }, [PORT_C] = { -1 }, [PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, [PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, [PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, [PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 }, }; static const int xelpd_port_mapping[][3] = { [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, [PORT_D_XELPD] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, [PORT_E_XELPD] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 }, [PORT_TC1] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 }, [PORT_TC2] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 }, [PORT_TC3] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 }, [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; if (DISPLAY_VER(i915) >= 13) return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping), ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, dvo_port); else if (IS_ALDERLAKE_S(i915)) return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping), ARRAY_SIZE(adls_port_mapping[0]), adls_port_mapping, dvo_port); else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), ARRAY_SIZE(rkl_port_mapping[0]), rkl_port_mapping, dvo_port); else return __dvo_port_to_port(ARRAY_SIZE(port_mapping), ARRAY_SIZE(port_mapping[0]), port_mapping, dvo_port); } static enum port dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port) { switch (dvo_port) { case DVO_PORT_MIPIA: return PORT_A; case DVO_PORT_MIPIC: if (DISPLAY_VER(i915) >= 11) return PORT_B; else return PORT_C; default: return PORT_NONE; } } enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915 = devdata->i915; const struct child_device_config *child = &devdata->child; enum port port; port = dvo_port_to_port(i915, child->dvo_port); if (port == PORT_NONE && DISPLAY_VER(i915) >= 11) port = dsi_dvo_port_to_port(i915, child->dvo_port); return port; } static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate) { switch (vbt_max_link_rate) { default: case BDB_230_VBT_DP_MAX_LINK_RATE_DEF: return 0; case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20: return 2000000; case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5: return 1350000; case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10: return 1000000; case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3: return 810000; case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2: return 540000; case BDB_230_VBT_DP_MAX_LINK_RATE_HBR: return 270000; case BDB_230_VBT_DP_MAX_LINK_RATE_LBR: return 162000; } } static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) { switch (vbt_max_link_rate) { default: case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3: return 810000; case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2: return 540000; case BDB_216_VBT_DP_MAX_LINK_RATE_HBR: return 270000; case BDB_216_VBT_DP_MAX_LINK_RATE_LBR: return 162000; } } int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 216) return 0; if (devdata->i915->display.vbt.version >= 230) return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate); else return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); } int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 244) return 0; return devdata->child.dp_max_lane_count + 1; } static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { struct drm_i915_private *i915 = devdata->i915; bool is_hdmi; if (port != PORT_A || DISPLAY_VER(i915) >= 12) return; if (!intel_bios_encoder_supports_dvi(devdata)) return; is_hdmi = intel_bios_encoder_supports_hdmi(devdata); drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n", is_hdmi ? "/HDMI" : ""); devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; } static bool intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata) { return devdata->child.device_type & DEVICE_TYPE_ANALOG_OUTPUT; } bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata) { return devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; } bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata) { return intel_bios_encoder_supports_dvi(devdata) && (devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; } bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata) { return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; } bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) { return intel_bios_encoder_supports_dp(devdata) && devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR; } bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) { return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; } bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) { return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon; } /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 158 || DISPLAY_VER(devdata->i915) >= 14) return -1; return devdata->child.hdmi_level_shifter_value; } int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 204) return 0; switch (devdata->child.hdmi_max_data_rate) { default: MISSING_CASE(devdata->child.hdmi_max_data_rate); fallthrough; case HDMI_MAX_DATA_RATE_PLATFORM: return 0; case HDMI_MAX_DATA_RATE_594: return 594000; case HDMI_MAX_DATA_RATE_340: return 340000; case HDMI_MAX_DATA_RATE_300: return 300000; case HDMI_MAX_DATA_RATE_297: return 297000; case HDMI_MAX_DATA_RATE_165: return 165000; } } static bool is_port_valid(struct drm_i915_private *i915, enum port port) { /* * On some ICL SKUs port F is not present, but broken VBTs mark * the port as present. Only try to initialize port F for the * SKUs that may actually have it. */ if (port == PORT_F && IS_ICELAKE(i915)) return IS_ICL_WITH_PORT_F(i915); return true; } static void print_ddi_port(const struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915 = devdata->i915; const struct child_device_config *child = &devdata->child; bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt; int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock; enum port port; port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; is_dvi = intel_bios_encoder_supports_dvi(devdata); is_dp = intel_bios_encoder_supports_dp(devdata); is_crt = intel_bios_encoder_supports_crt(devdata); is_hdmi = intel_bios_encoder_supports_hdmi(devdata); is_edp = intel_bios_encoder_supports_edp(devdata); is_dsi = intel_bios_encoder_supports_dsi(devdata); supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata); supports_tbt = intel_bios_encoder_supports_tbt(devdata); drm_dbg_kms(&i915->drm, "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d DP++:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, intel_bios_encoder_supports_dp_dual_mode(devdata), intel_bios_encoder_is_lspcon(devdata), supports_typec_usb, supports_tbt, devdata->dsc != NULL); hdmi_level_shift = intel_bios_hdmi_level_shift(devdata); if (hdmi_level_shift >= 0) { drm_dbg_kms(&i915->drm, "Port %c VBT HDMI level shift: %d\n", port_name(port), hdmi_level_shift); } max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata); if (max_tmds_clock) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI max TMDS clock: %d kHz\n", port_name(port), max_tmds_clock); /* I_boost config for SKL and above */ dp_boost_level = intel_bios_dp_boost_level(devdata); if (dp_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT (e)DP boost level: %d\n", port_name(port), dp_boost_level); hdmi_boost_level = intel_bios_hdmi_boost_level(devdata); if (hdmi_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI boost level: %d\n", port_name(port), hdmi_boost_level); dp_max_link_rate = intel_bios_dp_max_link_rate(devdata); if (dp_max_link_rate) drm_dbg_kms(&i915->drm, "Port %c VBT DP max link rate: %d\n", port_name(port), dp_max_link_rate); /* * FIXME need to implement support for VBT * vswing/preemph tables should this ever trigger. */ drm_WARN(&i915->drm, child->use_vbt_vswing, "Port %c asks to use VBT vswing/preemph tables\n", port_name(port)); } static void parse_ddi_port(struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915 = devdata->i915; enum port port; port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; if (!is_port_valid(i915, port)) { drm_dbg_kms(&i915->drm, "VBT reports port %c as supported, but that can't be true: skipping\n", port_name(port)); return; } sanitize_device_type(devdata, port); } static bool has_ddi_port_info(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 5 || IS_G4X(i915); } static void parse_ddi_ports(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata; if (!has_ddi_port_info(i915)) return; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) parse_ddi_port(devdata); list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) print_ddi_port(devdata); } static void parse_general_definitions(struct drm_i915_private *i915) { const struct bdb_general_definitions *defs; struct intel_bios_encoder_data *devdata; const struct child_device_config *child; int i, child_device_num; u8 expected_size; u16 block_size; int bus_pin; defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS); if (!defs) { drm_dbg_kms(&i915->drm, "No general definition block is found, no devices defined.\n"); return; } block_size = get_blocksize(defs); if (block_size < sizeof(*defs)) { drm_dbg_kms(&i915->drm, "General definitions block too small (%u)\n", block_size); return; } bus_pin = defs->crt_ddc_gmbus_pin; drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin); if (intel_gmbus_is_valid_pin(i915, bus_pin)) i915->display.vbt.crt_ddc_pin = bus_pin; if (i915->display.vbt.version < 106) { expected_size = 22; } else if (i915->display.vbt.version < 111) { expected_size = 27; } else if (i915->display.vbt.version < 195) { expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; } else if (i915->display.vbt.version == 195) { expected_size = 37; } else if (i915->display.vbt.version <= 215) { expected_size = 38; } else if (i915->display.vbt.version <= 250) { expected_size = 39; } else { expected_size = sizeof(*child); BUILD_BUG_ON(sizeof(*child) < 39); drm_dbg(&i915->drm, "Expected child device config size for VBT version %u not known; assuming %u\n", i915->display.vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ if (defs->child_dev_size != expected_size) drm_err(&i915->drm, "Unexpected child device config size %u (expected %u for VBT version %u)\n", defs->child_dev_size, expected_size, i915->display.vbt.version); /* The legacy sized child device config is the minimum we need. */ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { drm_dbg_kms(&i915->drm, "Child device config size %u is too small.\n", defs->child_dev_size); return; } /* get the number of child device */ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; for (i = 0; i < child_device_num; i++) { child = child_device_ptr(defs, i); if (!child->device_type) continue; drm_dbg_kms(&i915->drm, "Found VBT child device with type 0x%x\n", child->device_type); devdata = kzalloc(sizeof(*devdata), GFP_KERNEL); if (!devdata) break; devdata->i915 = i915; /* * Copy as much as we know (sizeof) and is available * (child_dev_size) of the child device config. Accessing the * data must depend on VBT version. */ memcpy(&devdata->child, child, min_t(size_t, defs->child_dev_size, sizeof(*child))); list_add_tail(&devdata->node, &i915->display.vbt.display_devices); } if (list_empty(&i915->display.vbt.display_devices)) drm_dbg_kms(&i915->drm, "no child dev is parsed from VBT\n"); } /* Common defaults which may be overridden by VBT. */ static void init_vbt_defaults(struct drm_i915_private *i915) { i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; /* general features */ i915->display.vbt.int_tv_support = 1; i915->display.vbt.int_crt_support = 1; /* driver features */ i915->display.vbt.int_lvds_support = 1; /* Default to using SSC */ i915->display.vbt.lvds_use_ssc = 1; /* * Core/SandyBridge/IvyBridge use alternative (120MHz) reference * clock for LVDS. */ i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, !HAS_PCH_SPLIT(i915)); drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n", i915->display.vbt.lvds_ssc_freq); } /* Common defaults which may be overridden by VBT. */ static void init_vbt_panel_defaults(struct intel_panel *panel) { /* Default to having backlight */ panel->vbt.backlight.present = true; /* LFP panel data */ panel->vbt.lvds_dither = true; } /* Defaults to initialize only if there is no VBT. */ static void init_vbt_missing_defaults(struct drm_i915_private *i915) { enum port port; int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F); if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) return; for_each_port_masked(port, ports) { struct intel_bios_encoder_data *devdata; struct child_device_config *child; enum phy phy = intel_port_to_phy(i915, port); /* * VBT has the TypeC mode (native,TBT/USB) and we don't want * to detect it. */ if (intel_phy_is_tc(i915, phy)) continue; /* Create fake child device config */ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL); if (!devdata) break; devdata->i915 = i915; child = &devdata->child; if (port == PORT_F) child->dvo_port = DVO_PORT_HDMIF; else if (port == PORT_E) child->dvo_port = DVO_PORT_HDMIE; else child->dvo_port = DVO_PORT_HDMIA + port; if (port != PORT_A && port != PORT_E) child->device_type |= DEVICE_TYPE_TMDS_DVI_SIGNALING; if (port != PORT_E) child->device_type |= DEVICE_TYPE_DISPLAYPORT_OUTPUT; if (port == PORT_A) child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR; list_add_tail(&devdata->node, &i915->display.vbt.display_devices); drm_dbg_kms(&i915->drm, "Generating default VBT child device with type 0x04%x on port %c\n", child->device_type, port_name(port)); } /* Bypass some minimum baseline VBT version checks */ i915->display.vbt.version = 155; } static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) { const void *_vbt = vbt; return _vbt + vbt->bdb_offset; } /** * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT * @buf: pointer to a buffer to validate * @size: size of the buffer * * Returns true on valid VBT. */ bool intel_bios_is_valid_vbt(const void *buf, size_t size) { const struct vbt_header *vbt = buf; const struct bdb_header *bdb; if (!vbt) return false; if (sizeof(struct vbt_header) > size) { DRM_DEBUG_DRIVER("VBT header incomplete\n"); return false; } if (memcmp(vbt->signature, "$VBT", 4)) { DRM_DEBUG_DRIVER("VBT invalid signature\n"); return false; } if (vbt->vbt_size > size) { DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n"); return false; } size = vbt->vbt_size; if (range_overflows_t(size_t, vbt->bdb_offset, sizeof(struct bdb_header), size)) { DRM_DEBUG_DRIVER("BDB header incomplete\n"); return false; } bdb = get_bdb_header(vbt); if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) { DRM_DEBUG_DRIVER("BDB incomplete\n"); return false; } return vbt; } static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset) { intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset); return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER); } static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) { u32 count, data, found, store = 0; u32 static_region, oprom_offset; u32 oprom_size = 0x200000; u16 vbt_size; u32 *vbt; static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS); static_region &= OPTIONROM_SPI_REGIONID_MASK; intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region); oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET); oprom_offset &= OROM_OFFSET_MASK; for (count = 0; count < oprom_size; count += 4) { data = intel_spi_read(&i915->uncore, oprom_offset + count); if (data == *((const u32 *)"$VBT")) { found = oprom_offset + count; break; } } if (count >= oprom_size) goto err_not_found; /* Get VBT size and allocate space for the VBT */ vbt_size = intel_spi_read(&i915->uncore, found + offsetof(struct vbt_header, vbt_size)); vbt_size &= 0xffff; vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); if (!vbt) goto err_not_found; for (count = 0; count < vbt_size; count += 4) *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); if (!intel_bios_is_valid_vbt(vbt, vbt_size)) goto err_free_vbt; drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); return (struct vbt_header *)vbt; err_free_vbt: kfree(vbt); err_not_found: return NULL; } static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); void __iomem *p = NULL, *oprom; struct vbt_header *vbt; u16 vbt_size; size_t i, size; oprom = pci_map_rom(pdev, &size); if (!oprom) return NULL; /* Scour memory looking for the VBT signature. */ for (i = 0; i + 4 < size; i += 4) { if (ioread32(oprom + i) != *((const u32 *)"$VBT")) continue; p = oprom + i; size -= i; break; } if (!p) goto err_unmap_oprom; if (sizeof(struct vbt_header) > size) { drm_dbg(&i915->drm, "VBT header incomplete\n"); goto err_unmap_oprom; } vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); if (vbt_size > size) { drm_dbg(&i915->drm, "VBT incomplete (vbt_size overflows)\n"); goto err_unmap_oprom; } /* The rest will be validated by intel_bios_is_valid_vbt() */ vbt = kmalloc(vbt_size, GFP_KERNEL); if (!vbt) goto err_unmap_oprom; memcpy_fromio(vbt, p, vbt_size); if (!intel_bios_is_valid_vbt(vbt, vbt_size)) goto err_free_vbt; pci_unmap_rom(pdev, oprom); drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); return vbt; err_free_vbt: kfree(vbt); err_unmap_oprom: pci_unmap_rom(pdev, oprom); return NULL; } /** * intel_bios_init - find VBT and initialize settings from the BIOS * @i915: i915 device instance * * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also * initialize some defaults if the VBT is not present at all. */ void intel_bios_init(struct drm_i915_private *i915) { const struct vbt_header *vbt = i915->display.opregion.vbt; struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; INIT_LIST_HEAD(&i915->display.vbt.display_devices); INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks); if (!HAS_DISPLAY(i915)) { drm_dbg_kms(&i915->drm, "Skipping VBT init due to disabled display.\n"); return; } init_vbt_defaults(i915); /* * If the OpRegion does not have VBT, look in SPI flash through MMIO or * PCI mapping */ if (!vbt && IS_DGFX(i915)) { oprom_vbt = spi_oprom_get_vbt(i915); vbt = oprom_vbt; } if (!vbt) { oprom_vbt = oprom_get_vbt(i915); vbt = oprom_vbt; } if (!vbt) goto out; bdb = get_bdb_header(vbt); i915->display.vbt.version = bdb->version; drm_dbg_kms(&i915->drm, "VBT signature \"%.*s\", BDB version %d\n", (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version); init_bdb_blocks(i915, bdb); /* Grab useful general definitions */ parse_general_features(i915); parse_general_definitions(i915); parse_driver_features(i915); /* Depends on child device list */ parse_compression_parameters(i915); out: if (!vbt) { drm_info(&i915->drm, "Failed to find VBIOS tables (VBT)\n"); init_vbt_missing_defaults(i915); } /* Further processing on pre-parsed or generated child device data */ parse_sdvo_device_mapping(i915); parse_ddi_ports(i915); kfree(oprom_vbt); } static void intel_bios_init_panel(struct drm_i915_private *i915, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid, bool use_fallback) { /* already have it? */ if (panel->vbt.panel_type >= 0) { drm_WARN_ON(&i915->drm, !use_fallback); return; } panel->vbt.panel_type = get_panel_type(i915, devdata, drm_edid, use_fallback); if (panel->vbt.panel_type < 0) { drm_WARN_ON(&i915->drm, use_fallback); return; } init_vbt_panel_defaults(panel); parse_panel_options(i915, panel); parse_generic_dtd(i915, panel); parse_lfp_data(i915, panel); parse_lfp_backlight(i915, panel); parse_sdvo_panel_data(i915, panel); parse_panel_driver_features(i915, panel); parse_power_conservation_features(i915, panel); parse_edp(i915, panel); parse_psr(i915, panel); parse_mipi_config(i915, panel); parse_mipi_sequence(i915, panel); } void intel_bios_init_panel_early(struct drm_i915_private *i915, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata) { intel_bios_init_panel(i915, panel, devdata, NULL, false); } void intel_bios_init_panel_late(struct drm_i915_private *i915, struct intel_panel *panel, const struct intel_bios_encoder_data *devdata, const struct drm_edid *drm_edid) { intel_bios_init_panel(i915, panel, devdata, drm_edid, true); } /** * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() * @i915: i915 device instance */ void intel_bios_driver_remove(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata, *nd; struct bdb_block_entry *entry, *ne; list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) { list_del(&devdata->node); kfree(devdata->dsc); kfree(devdata); } list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) { list_del(&entry->node); kfree(entry); } } void intel_bios_fini_panel(struct intel_panel *panel) { kfree(panel->vbt.sdvo_lvds_vbt_mode); panel->vbt.sdvo_lvds_vbt_mode = NULL; kfree(panel->vbt.lfp_lvds_vbt_mode); panel->vbt.lfp_lvds_vbt_mode = NULL; kfree(panel->vbt.dsi.data); panel->vbt.dsi.data = NULL; kfree(panel->vbt.dsi.pps); panel->vbt.dsi.pps = NULL; kfree(panel->vbt.dsi.config); panel->vbt.dsi.config = NULL; kfree(panel->vbt.dsi.deassert_seq); panel->vbt.dsi.deassert_seq = NULL; } /** * intel_bios_is_tv_present - is integrated TV present in VBT * @i915: i915 device instance * * Return true if TV is present. If no child devices were parsed from VBT, * assume TV is present. */ bool intel_bios_is_tv_present(struct drm_i915_private *i915) { const struct intel_bios_encoder_data *devdata; if (!i915->display.vbt.int_tv_support) return false; if (list_empty(&i915->display.vbt.display_devices)) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; /* * If the device type is not TV, continue. */ switch (child->device_type) { case DEVICE_TYPE_INT_TV: case DEVICE_TYPE_TV: case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: break; default: continue; } /* Only when the addin_offset is non-zero, it is regarded * as present. */ if (child->addin_offset) return true; } return false; } /** * intel_bios_is_lvds_present - is LVDS present in VBT * @i915: i915 device instance * @i2c_pin: i2c pin for LVDS if present * * Return true if LVDS is present. If no child devices were parsed from VBT, * assume LVDS is present. */ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) { const struct intel_bios_encoder_data *devdata; if (list_empty(&i915->display.vbt.display_devices)) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the * old for compatibility with some BIOSes. */ if (child->device_type != DEVICE_TYPE_INT_LFP && child->device_type != DEVICE_TYPE_LFP) continue; if (intel_gmbus_is_valid_pin(i915, child->i2c_pin)) *i2c_pin = child->i2c_pin; /* However, we cannot trust the BIOS writers to populate * the VBT correctly. Since LVDS requires additional * information from AIM blocks, a non-zero addin offset is * a good indicator that the LVDS is actually present. */ if (child->addin_offset) return true; /* But even then some BIOS writers perform some black magic * and instantiate the device without reference to any * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ if (i915->display.opregion.vbt) return true; } return false; } /** * intel_bios_is_port_present - is the specified digital port present * @i915: i915 device instance * @port: port to check * * Return true if the device in %port is present. */ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) { const struct intel_bios_encoder_data *devdata; if (WARN_ON(!has_ddi_port_info(i915))) return true; if (!is_port_valid(i915, port)) return false; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; if (dvo_port_to_port(i915, child->dvo_port) == port) return true; } return false; } bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) { const struct child_device_config *child = &devdata->child; if (!intel_bios_encoder_supports_dp(devdata) || !intel_bios_encoder_supports_hdmi(devdata)) return false; if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) return true; /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA && child->aux_channel != 0) return true; return false; } /** * intel_bios_is_dsi_present - is DSI present in VBT * @i915: i915 device instance * @port: port for DSI if present * * Return true if DSI is present, and return the port in %port. */ bool intel_bios_is_dsi_present(struct drm_i915_private *i915, enum port *port) { const struct intel_bios_encoder_data *devdata; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; u8 dvo_port = child->dvo_port; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) { drm_dbg_kms(&i915->drm, "VBT has unsupported DSI port %c\n", port_name(dvo_port - DVO_PORT_MIPIA)); continue; } if (port) *port = dsi_dvo_port_to_port(i915, dvo_port); return true; } return false; } static void fill_dsc(struct intel_crtc_state *crtc_state, struct dsc_compression_parameters_entry *dsc, int dsc_max_bpc) { struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; int bpc = 8; vdsc_cfg->dsc_version_major = dsc->version_major; vdsc_cfg->dsc_version_minor = dsc->version_minor; if (dsc->support_12bpc && dsc_max_bpc >= 12) bpc = 12; else if (dsc->support_10bpc && dsc_max_bpc >= 10) bpc = 10; else if (dsc->support_8bpc && dsc_max_bpc >= 8) bpc = 8; else DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n", dsc_max_bpc); crtc_state->pipe_bpp = bpc * 3; crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp, VBT_DSC_MAX_BPP(dsc->max_bpp)); /* * FIXME: This is ugly, and slice count should take DSC engine * throughput etc. into account. * * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices. */ if (dsc->slices_per_line & BIT(2)) { crtc_state->dsc.slice_count = 4; } else if (dsc->slices_per_line & BIT(1)) { crtc_state->dsc.slice_count = 2; } else { /* FIXME */ if (!(dsc->slices_per_line & BIT(0))) DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n"); crtc_state->dsc.slice_count = 1; } if (crtc_state->hw.adjusted_mode.crtc_hdisplay % crtc_state->dsc.slice_count != 0) DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n", crtc_state->hw.adjusted_mode.crtc_hdisplay, crtc_state->dsc.slice_count); /* * The VBT rc_buffer_block_size and rc_buffer_size definitions * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. */ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size, dsc->rc_buffer_size); /* FIXME: DSI spec says bpc + 1 for this one */ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth); vdsc_cfg->block_pred_enable = dsc->block_prediction_enable; vdsc_cfg->slice_height = dsc->slice_height; } /* FIXME: initially DSI specific */ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_bios_encoder_data *devdata; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { const struct child_device_config *child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) { if (!devdata->dsc) return false; if (crtc_state) fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc); return true; } } return false; } static const u8 adlp_aux_ch_map[] = { [AUX_CH_A] = DP_AUX_A, [AUX_CH_B] = DP_AUX_B, [AUX_CH_C] = DP_AUX_C, [AUX_CH_D_XELPD] = DP_AUX_D, [AUX_CH_E_XELPD] = DP_AUX_E, [AUX_CH_USBC1] = DP_AUX_F, [AUX_CH_USBC2] = DP_AUX_G, [AUX_CH_USBC3] = DP_AUX_H, [AUX_CH_USBC4] = DP_AUX_I, }; /* * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E * map to DDI A,TC1,TC2,TC3,TC4 respectively. */ static const u8 adls_aux_ch_map[] = { [AUX_CH_A] = DP_AUX_A, [AUX_CH_USBC1] = DP_AUX_B, [AUX_CH_USBC2] = DP_AUX_C, [AUX_CH_USBC3] = DP_AUX_D, [AUX_CH_USBC4] = DP_AUX_E, }; /* * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D * map to DDI A,B,TC1,TC2 respectively. */ static const u8 rkl_aux_ch_map[] = { [AUX_CH_A] = DP_AUX_A, [AUX_CH_B] = DP_AUX_B, [AUX_CH_USBC1] = DP_AUX_C, [AUX_CH_USBC2] = DP_AUX_D, }; static const u8 direct_aux_ch_map[] = { [AUX_CH_A] = DP_AUX_A, [AUX_CH_B] = DP_AUX_B, [AUX_CH_C] = DP_AUX_C, [AUX_CH_D] = DP_AUX_D, /* aka AUX_CH_USBC1 */ [AUX_CH_E] = DP_AUX_E, /* aka AUX_CH_USBC2 */ [AUX_CH_F] = DP_AUX_F, /* aka AUX_CH_USBC3 */ [AUX_CH_G] = DP_AUX_G, /* aka AUX_CH_USBC4 */ [AUX_CH_H] = DP_AUX_H, /* aka AUX_CH_USBC5 */ [AUX_CH_I] = DP_AUX_I, /* aka AUX_CH_USBC6 */ }; static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) { const u8 *aux_ch_map; int i, n_entries; if (DISPLAY_VER(i915) >= 13) { aux_ch_map = adlp_aux_ch_map; n_entries = ARRAY_SIZE(adlp_aux_ch_map); } else if (IS_ALDERLAKE_S(i915)) { aux_ch_map = adls_aux_ch_map; n_entries = ARRAY_SIZE(adls_aux_ch_map); } else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) { aux_ch_map = rkl_aux_ch_map; n_entries = ARRAY_SIZE(rkl_aux_ch_map); } else { aux_ch_map = direct_aux_ch_map; n_entries = ARRAY_SIZE(direct_aux_ch_map); } for (i = 0; i < n_entries; i++) { if (aux_ch_map[i] == aux_channel) return i; } drm_dbg_kms(&i915->drm, "Ignoring alternate AUX CH: VBT claims AUX 0x%x, which is not valid for this platform\n", aux_channel); return AUX_CH_NONE; } enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) { if (!devdata || !devdata->child.aux_channel) return AUX_CH_NONE; return map_aux_ch(devdata->i915, devdata->child.aux_channel); } bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata) { struct drm_i915_private *i915; u8 aux_channel; int count = 0; if (!devdata || !devdata->child.aux_channel) return false; i915 = devdata->i915; aux_channel = devdata->child.aux_channel; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { if (intel_bios_encoder_supports_dp(devdata) && aux_channel == devdata->child.aux_channel) count++; } return count > 1; } int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.dp_iboost_level); } int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.hdmi_iboost_level); } int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) { if (!devdata || !devdata->child.ddc_pin) return 0; return map_ddc_pin(devdata->i915, devdata->child.ddc_pin); } bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) { return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c; } bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata) { return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; } bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata) { return devdata && devdata->child.lane_reversal; } bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata) { return devdata && devdata->child.hpd_invert; } const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { struct intel_bios_encoder_data *devdata; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { if (intel_bios_encoder_port(devdata) == port) return devdata; } return NULL; } void intel_bios_for_each_encoder(struct drm_i915_private *i915, void (*func)(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata)) { struct intel_bios_encoder_data *devdata; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) func(i915, devdata); }
linux-master
drivers/gpu/drm/i915/display/intel_bios.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation */ #include "i915_drv.h" #include "intel_de.h" #include "intel_display.h" #include "intel_hti.h" #include "intel_hti_regs.h" void intel_hti_init(struct drm_i915_private *i915) { /* * If the platform has HTI, we need to find out whether it has reserved * any display resources before we create our display outputs. */ if (DISPLAY_INFO(i915)->has_hti) i915->display.hti.state = intel_de_read(i915, HDPORT_STATE); } bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy) { if (drm_WARN_ON(&i915->drm, phy == PHY_NONE)) return false; return i915->display.hti.state & HDPORT_ENABLED && i915->display.hti.state & HDPORT_DDI_USED(phy); } u32 intel_hti_dpll_mask(struct drm_i915_private *i915) { if (!(i915->display.hti.state & HDPORT_ENABLED)) return 0; /* * Note: This is subtle. The values must coincide with what's defined * for the platform. */ return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->display.hti.state); }
linux-master
drivers/gpu/drm/i915/display/intel_hti.c
// SPDX-License-Identifier: MIT /* * Copyright © 2018 Intel Corporation */ #include <drm/drm_mipi_dsi.h> #include "i915_drv.h" #include "intel_dsi.h" #include "intel_panel.h" void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi) { ktime_t panel_power_on_time; s64 panel_power_off_duration; panel_power_on_time = ktime_get_boottime(); panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dsi->panel_power_off_time); if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay) msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration); } void intel_dsi_shutdown(struct intel_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsi_wait_panel_power_cycle(intel_dsi); } int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) { int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); if (WARN_ON(bpp < 0)) bpp = 16; return intel_dsi->pclk * bpp / intel_dsi->lane_count; } int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi) { switch (intel_dsi->escape_clk_div) { default: case 0: return 50; case 1: return 100; case 2: return 200; } } int intel_dsi_get_modes(struct drm_connector *connector) { return intel_panel_get_modes(to_intel_connector(connector)); } enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(intel_connector, mode); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; enum drm_mode_status status; drm_dbg_kms(&dev_priv->drm, "\n"); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; status = intel_panel_mode_valid(intel_connector, mode); if (status != MODE_OK) return status; if (fixed_mode->clock > max_dotclk) return MODE_CLOCK_HIGH; return intel_mode_valid_max_plane_size(dev_priv, mode, false); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, const struct mipi_dsi_host_ops *funcs, enum port port) { struct intel_dsi_host *host; struct mipi_dsi_device *device; host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return NULL; host->base.ops = funcs; host->intel_dsi = intel_dsi; host->port = port; /* * We should call mipi_dsi_host_register(&host->base) here, but we don't * have a host->dev, and we don't have OF stuff either. So just use the * dsi framework as a library and hope for the best. Create the dsi * devices by ourselves here too. Need to be careful though, because we * don't initialize any of the driver model devices here. */ device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) { kfree(host); return NULL; } device->host = &host->base; host->device = device; return host; } enum drm_panel_orientation intel_dsi_get_panel_orientation(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum drm_panel_orientation orientation; orientation = connector->panel.vbt.dsi.orientation; if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; orientation = dev_priv->display.vbt.orientation; if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; return DRM_MODE_PANEL_ORIENTATION_NORMAL; }
linux-master
drivers/gpu/drm/i915/display/intel_dsi.c
/* * Copyright © 2006-2017 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/time.h> #include "hsw_ips.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_mchbar_regs.h" #include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_psr.h" #include "intel_vdsc.h" #include "vlv_sideband.h" /** * DOC: CDCLK / RAWCLK * * The display engine uses several different clocks to do its work. There * are two main clocks involved that aren't directly related to the actual * pixel clock or any symbol/bit clock of the actual output port. These * are the core display clock (CDCLK) and RAWCLK. * * CDCLK clocks most of the display pipe logic, and thus its frequency * must be high enough to support the rate at which pixels are flowing * through the pipes. Downscaling must also be accounted as that increases * the effective pixel rate. * * On several platforms the CDCLK frequency can be changed dynamically * to minimize power consumption for a given display configuration. * Typically changes to the CDCLK frequency require all the display pipes * to be shut down while the frequency is being changed. * * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit. * DMC will not change the active CDCLK frequency however, so that part * will still be performed by the driver directly. * * RAWCLK is a fixed frequency clock, often used by various auxiliary * blocks such as AUX CH or backlight PWM. Hence the only thing we * really need to know about RAWCLK is its frequency so that various * dividers can be programmed correctly. */ struct intel_cdclk_funcs { void (*get_cdclk)(struct drm_i915_private *i915, struct intel_cdclk_config *cdclk_config); void (*set_cdclk)(struct drm_i915_private *i915, const struct intel_cdclk_config *cdclk_config, enum pipe pipe); int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); u8 (*calc_voltage_level)(int cdclk); }; void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config); } static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe); } static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_config) { return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config); } static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk); } static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 133333; } static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 200000; } static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 266667; } static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 333333; } static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 400000; } static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { cdclk_config->cdclk = 450000; } static void i85x_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 hpllcc = 0; /* * 852GM/852GMV only supports 133 MHz and the HPLLCC * encoding is different :( * FIXME is this the right way to detect 852GM/852GMV? */ if (pdev->revision == 0x1) { cdclk_config->cdclk = 133333; return; } pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 3), HPLLCC, &hpllcc); /* Assume that the hardware is in the high speed state. This * should be the default. */ switch (hpllcc & GC_CLOCK_CONTROL_MASK) { case GC_CLOCK_133_200: case GC_CLOCK_133_200_2: case GC_CLOCK_100_200: cdclk_config->cdclk = 200000; break; case GC_CLOCK_166_250: cdclk_config->cdclk = 250000; break; case GC_CLOCK_100_133: cdclk_config->cdclk = 133333; break; case GC_CLOCK_133_266: case GC_CLOCK_133_266_2: case GC_CLOCK_166_266: cdclk_config->cdclk = 266667; break; } } static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { cdclk_config->cdclk = 133333; return; } switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_320_MHZ: cdclk_config->cdclk = 333333; break; default: case GC_DISPLAY_CLOCK_190_200_MHZ: cdclk_config->cdclk = 190000; break; } } static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { cdclk_config->cdclk = 133333; return; } switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_320_MHZ: cdclk_config->cdclk = 320000; break; default: case GC_DISPLAY_CLOCK_190_200_MHZ: cdclk_config->cdclk = 200000; break; } } static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) { static const unsigned int blb_vco[8] = { [0] = 3200000, [1] = 4000000, [2] = 5333333, [3] = 4800000, [4] = 6400000, }; static const unsigned int pnv_vco[8] = { [0] = 3200000, [1] = 4000000, [2] = 5333333, [3] = 4800000, [4] = 2666667, }; static const unsigned int cl_vco[8] = { [0] = 3200000, [1] = 4000000, [2] = 5333333, [3] = 6400000, [4] = 3333333, [5] = 3566667, [6] = 4266667, }; static const unsigned int elk_vco[8] = { [0] = 3200000, [1] = 4000000, [2] = 5333333, [3] = 4800000, }; static const unsigned int ctg_vco[8] = { [0] = 3200000, [1] = 4000000, [2] = 5333333, [3] = 6400000, [4] = 2666667, [5] = 4266667, }; const unsigned int *vco_table; unsigned int vco; u8 tmp = 0; /* FIXME other chipsets? */ if (IS_GM45(dev_priv)) vco_table = ctg_vco; else if (IS_G45(dev_priv)) vco_table = elk_vco; else if (IS_I965GM(dev_priv)) vco_table = cl_vco; else if (IS_PINEVIEW(dev_priv)) vco_table = pnv_vco; else if (IS_G33(dev_priv)) vco_table = blb_vco; else return 0; tmp = intel_de_read(dev_priv, IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); else drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); return vco; } static void g33_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 }; const u8 *div_table; unsigned int cdclk_sel; u16 tmp = 0; cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = (tmp >> 4) & 0x7; if (cdclk_sel >= ARRAY_SIZE(div_3200)) goto fail; switch (cdclk_config->vco) { case 3200000: div_table = div_3200; break; case 4000000: div_table = div_4000; break; case 4800000: div_table = div_4800; break; case 5333333: div_table = div_5333; break; default: goto fail; } cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div_table[cdclk_sel]); return; fail: drm_err(&dev_priv->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 190476; } static void pnv_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_267_MHZ_PNV: cdclk_config->cdclk = 266667; break; case GC_DISPLAY_CLOCK_333_MHZ_PNV: cdclk_config->cdclk = 333333; break; case GC_DISPLAY_CLOCK_444_MHZ_PNV: cdclk_config->cdclk = 444444; break; case GC_DISPLAY_CLOCK_200_MHZ_PNV: cdclk_config->cdclk = 200000; break; default: drm_err(&dev_priv->drm, "Unknown pnv display core clock 0x%04x\n", gcfgc); fallthrough; case GC_DISPLAY_CLOCK_133_MHZ_PNV: cdclk_config->cdclk = 133333; break; case GC_DISPLAY_CLOCK_167_MHZ_PNV: cdclk_config->cdclk = 166667; break; } } static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); static const u8 div_3200[] = { 16, 10, 8 }; static const u8 div_4000[] = { 20, 12, 10 }; static const u8 div_5333[] = { 24, 16, 14 }; const u8 *div_table; unsigned int cdclk_sel; u16 tmp = 0; cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = ((tmp >> 8) & 0x1f) - 1; if (cdclk_sel >= ARRAY_SIZE(div_3200)) goto fail; switch (cdclk_config->vco) { case 3200000: div_table = div_3200; break; case 4000000: div_table = div_4000; break; case 5333333: div_table = div_5333; break; default: goto fail; } cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div_table[cdclk_sel]); return; fail: drm_err(&dev_priv->drm, "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 200000; } static void gm45_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); unsigned int cdclk_sel; u16 tmp = 0; cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = (tmp >> 12) & 0x1; switch (cdclk_config->vco) { case 2666667: case 4000000: case 5333333: cdclk_config->cdclk = cdclk_sel ? 333333 : 222222; break; case 3200000: cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; break; default: drm_err(&dev_priv->drm, "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", cdclk_config->vco, tmp); cdclk_config->cdclk = 222222; break; } } static void hsw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; else if (IS_HASWELL_ULT(dev_priv)) cdclk_config->cdclk = 337500; else cdclk_config->cdclk = 540000; } static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; /* * We seem to get an unstable or solid color picture at 200MHz. * Not sure what's wrong. For now use 200MHz only when all pipes * are off. */ if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320) return 400000; else if (min_cdclk > 266667) return freq_320; else if (min_cdclk > 0) return 266667; else return 200000; } static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { if (IS_VALLEYVIEW(dev_priv)) { if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ return 2; else if (cdclk >= 266667) return 1; else return 0; } else { /* * Specs are full of misinformation, but testing on actual * hardware has shown that we just need to write the desired * CCK divider into the Punit register. */ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; } } static void vlv_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 val; vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); cdclk_config->vco = vlv_get_hpll_vco(dev_priv); cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk", CCK_DISPLAY_CLOCK_CONTROL, cdclk_config->vco); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); if (IS_VALLEYVIEW(dev_priv)) cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> DSPFREQGUAR_SHIFT; else cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >> DSPFREQGUAR_SHIFT_CHV; } static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) { unsigned int credits, default_credits; if (IS_CHERRYVIEW(dev_priv)) default_credits = PFI_CREDIT(12); else default_credits = PFI_CREDIT(8); if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) { /* CHV suggested value is 31 or 63 */ if (IS_CHERRYVIEW(dev_priv)) credits = PFI_CREDIT_63; else credits = PFI_CREDIT(15); } else { credits = default_credits; } /* * WA - write default credits before re-programming * FIXME: should we also set the resend bit here? */ intel_de_write(dev_priv, GCI_CONTROL, VGA_FAST_MODE_DISABLE | default_credits); intel_de_write(dev_priv, GCI_CONTROL, VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); /* * FIXME is this guaranteed to clear * immediately or should we poll for it? */ drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); } static void vlv_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; switch (cdclk) { case 400000: case 333333: case 320000: case 266667: case 200000: break; default: MISSING_CASE(cdclk); return; } /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_BUNIT) | BIT(VLV_IOSF_SB_PUNIT)); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { drm_err(&dev_priv->drm, "timed out waiting for CDclk change\n"); } if (cdclk == 400000) { u32 divider; divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; /* adjust cdclk divider */ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); val &= ~CCK_FREQUENCY_VALUES; val |= divider; vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 50)) drm_err(&dev_priv->drm, "timed out waiting for CDclk change\n"); } /* adjust self-refresh exit latency value */ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); val &= ~0x7f; /* * For high bandwidth configs, we set a higher latency in the bunit * so that the core display fetch happens in time to avoid underruns. */ if (cdclk == 400000) val |= 4500 / 250; /* 4.5 usec */ else val |= 3000 / 250; /* 3.0 usec */ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_BUNIT) | BIT(VLV_IOSF_SB_PUNIT)); intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; switch (cdclk) { case 333333: case 320000: case 266667: case 200000: break; default: MISSING_CASE(cdclk); return; } /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { drm_err(&dev_priv->drm, "timed out waiting for CDclk change\n"); } vlv_punit_put(dev_priv); intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static int bdw_calc_cdclk(int min_cdclk) { if (min_cdclk > 540000) return 675000; else if (min_cdclk > 450000) return 540000; else if (min_cdclk > 337500) return 450000; else return 337500; } static u8 bdw_calc_voltage_level(int cdclk) { switch (cdclk) { default: case 337500: return 2; case 450000: return 0; case 540000: return 1; case 675000: return 3; } } static void bdw_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) cdclk_config->cdclk = 800000; else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_54O_BDW) cdclk_config->cdclk = 540000; else if (freq == LCPLL_CLK_FREQ_337_5_BDW) cdclk_config->cdclk = 337500; else cdclk_config->cdclk = 675000; /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = bdw_calc_voltage_level(cdclk_config->cdclk); } static u32 bdw_cdclk_freq_sel(int cdclk) { switch (cdclk) { default: MISSING_CASE(cdclk); fallthrough; case 337500: return LCPLL_CLK_FREQ_337_5_BDW; case 450000: return LCPLL_CLK_FREQ_450; case 540000: return LCPLL_CLK_FREQ_54O_BDW; case 675000: return LCPLL_CLK_FREQ_675_BDW; } } static void bdw_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int ret; if (drm_WARN(&dev_priv->drm, (intel_de_read(dev_priv, LCPLL_CTL) & (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, "trying to change cdclk frequency with cdclk not enabled\n")) return; ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { drm_err(&dev_priv->drm, "failed to inform pcode about cdclk change\n"); return; } intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_CD_SOURCE_FCLK); /* * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 100)) drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk)); intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level); intel_de_write(dev_priv, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); intel_update_cdclk(dev_priv); } static int skl_calc_cdclk(int min_cdclk, int vco) { if (vco == 8640000) { if (min_cdclk > 540000) return 617143; else if (min_cdclk > 432000) return 540000; else if (min_cdclk > 308571) return 432000; else return 308571; } else { if (min_cdclk > 540000) return 675000; else if (min_cdclk > 450000) return 540000; else if (min_cdclk > 337500) return 450000; else return 337500; } } static u8 skl_calc_voltage_level(int cdclk) { if (cdclk > 540000) return 3; else if (cdclk > 450000) return 2; else if (cdclk > 337500) return 1; else return 0; } static void skl_dpll0_update(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 val; cdclk_config->ref = 24000; cdclk_config->vco = 0; val = intel_de_read(dev_priv, LCPLL1_CTL); if ((val & LCPLL_PLL_ENABLE) == 0) return; if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) return; val = intel_de_read(dev_priv, DPLL_CTRL1); if (drm_WARN_ON(&dev_priv->drm, (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) return; switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): cdclk_config->vco = 8100000; break; case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): cdclk_config->vco = 8640000; break; default: MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); break; } } static void skl_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 cdctl; skl_dpll0_update(dev_priv, cdclk_config); cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; if (cdclk_config->vco == 0) goto out; cdctl = intel_de_read(dev_priv, CDCLK_CTL); if (cdclk_config->vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: cdclk_config->cdclk = 432000; break; case CDCLK_FREQ_337_308: cdclk_config->cdclk = 308571; break; case CDCLK_FREQ_540: cdclk_config->cdclk = 540000; break; case CDCLK_FREQ_675_617: cdclk_config->cdclk = 617143; break; default: MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); break; } } else { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: cdclk_config->cdclk = 450000; break; case CDCLK_FREQ_337_308: cdclk_config->cdclk = 337500; break; case CDCLK_FREQ_540: cdclk_config->cdclk = 540000; break; case CDCLK_FREQ_675_617: cdclk_config->cdclk = 675000; break; default: MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); break; } } out: /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = skl_calc_voltage_level(cdclk_config->cdclk); } /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ static int skl_cdclk_decimal(int cdclk) { return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) { bool changed = dev_priv->skl_preferred_vco_freq != vco; dev_priv->skl_preferred_vco_freq = vco; if (changed) intel_update_max_cdclk(dev_priv); } static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) { drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); /* * We always enable DPLL0 with the lowest link rate possible, but still * taking into account the VCO required to operate the eDP panel at the * desired frequency. The usual DP link rates operate with a VCO of * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. * The modeset code is responsible for the selection of the exact link * rate later on, with the constraint of choosing a frequency that * works with vco. */ if (vco == 8640000) return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); else return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0); } static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { intel_de_rmw(dev_priv, DPLL_CTRL1, DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0), DPLL_CTRL1_OVERRIDE(SKL_DPLL0) | skl_dpll0_link_rate(dev_priv, vco)); intel_de_posting_read(dev_priv, DPLL_CTRL1); intel_de_rmw(dev_priv, LCPLL1_CTL, 0, LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) drm_err(&dev_priv->drm, "DPLL0 not locked\n"); dev_priv->display.cdclk.hw.vco = vco; /* We'll want to keep using the current vco from now on. */ skl_set_preferred_cdclk_vco(dev_priv, vco); } static void skl_dpll0_disable(struct drm_i915_private *dev_priv) { intel_de_rmw(dev_priv, LCPLL1_CTL, LCPLL_PLL_ENABLE, 0); if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); dev_priv->display.cdclk.hw.vco = 0; } static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, int cdclk, int vco) { switch (cdclk) { default: drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 308571: case 337500: return CDCLK_FREQ_337_308; case 450000: case 432000: return CDCLK_FREQ_450_432; case 540000: return CDCLK_FREQ_540; case 617143: case 675000: return CDCLK_FREQ_675_617; } } static void skl_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 freq_select, cdclk_ctl; int ret; /* * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are * unsupported on SKL. In theory this should never happen since only * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not * supported on SKL either, see the above WA. WARN whenever trying to * use the corresponding VCO freq as that always leads to using the * minimum 308MHz CDCLK. */ drm_WARN_ON_ONCE(&dev_priv->drm, IS_SKYLAKE(dev_priv) && vco == 8640000); ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) { drm_err(&dev_priv->drm, "Failed to inform PCU about cdclk change (%d)\n", ret); return; } freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); if (dev_priv->display.cdclk.hw.vco != 0 && dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); if (dev_priv->display.cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); } /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); intel_de_posting_read(dev_priv, CDCLK_CTL); if (dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); intel_de_posting_read(dev_priv, CDCLK_CTL); /* inform PCU of the change */ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); intel_update_cdclk(dev_priv); } static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; /* * check if the pre-os initialized the display * There is SWF18 scratchpad register defined which is set by the * pre-os which can be used by the OS drivers to check the status */ if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; intel_update_cdclk(dev_priv); intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ if (dev_priv->display.cdclk.hw.vco == 0 || dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock * * Noticed in some instances that the freq selection is correct but * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ cdctl = intel_de_read(dev_priv, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk); if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ dev_priv->display.cdclk.hw.vco = -1; } static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config; skl_sanitize_cdclk(dev_priv); if (dev_priv->display.cdclk.hw.cdclk != 0 && dev_priv->display.cdclk.hw.vco != 0) { /* * Use the current vco as our initial * guess as to what the preferred vco is. */ if (dev_priv->skl_preferred_vco_freq == 0) skl_set_preferred_cdclk_vco(dev_priv, dev_priv->display.cdclk.hw.vco); return; } cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.vco = dev_priv->skl_preferred_vco_freq; if (cdclk_config.vco == 0) cdclk_config.vco = 8100000; cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } struct intel_cdclk_vals { u32 cdclk; u16 refclk; u16 waveform; u8 divider; /* CD2X divider * 2 */ u8 ratio; }; static const struct intel_cdclk_vals bxt_cdclk_table[] = { { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, {} }; static const struct intel_cdclk_vals glk_cdclk_table[] = { { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, {} }; static const struct intel_cdclk_vals icl_cdclk_table[] = { { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, {} }; static const struct intel_cdclk_vals rkl_cdclk_table[] = { { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 }, { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 }, { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 }, { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 }, { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 }, { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 }, { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 }, { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 }, { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 }, { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 }, { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 }, { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 }, { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 }, { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 }, { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 }, { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 }, { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 }, { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 }, {} }; static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = { { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 }, { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, {} }; static const struct intel_cdclk_vals adlp_cdclk_table[] = { { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, {} }; static const struct intel_cdclk_vals rplu_cdclk_table[] = { { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 }, { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 }, { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 }, { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, {} }; static const struct intel_cdclk_vals dg2_cdclk_table[] = { { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, {} }; static const struct intel_cdclk_vals mtl_cdclk_table[] = { { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a }, { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 }, { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 }, { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 }, { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 }, { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 }, {} }; static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; for (i = 0; table[i].refclk; i++) if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk >= min_cdclk) return table[i].cdclk; drm_WARN(&dev_priv->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", min_cdclk, dev_priv->display.cdclk.hw.ref); return 0; } static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) return dev_priv->display.cdclk.hw.ref * table[i].ratio; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", cdclk, dev_priv->display.cdclk.hw.ref); return 0; } static u8 bxt_calc_voltage_level(int cdclk) { return DIV_ROUND_UP(cdclk, 25000); } static u8 icl_calc_voltage_level(int cdclk) { if (cdclk > 556800) return 2; else if (cdclk > 312000) return 1; else return 0; } static u8 ehl_calc_voltage_level(int cdclk) { if (cdclk > 326400) return 3; else if (cdclk > 312000) return 2; else if (cdclk > 180000) return 1; else return 0; } static u8 tgl_calc_voltage_level(int cdclk) { if (cdclk > 556800) return 3; else if (cdclk > 326400) return 2; else if (cdclk > 312000) return 1; else return 0; } static u8 rplu_calc_voltage_level(int cdclk) { if (cdclk > 556800) return 3; else if (cdclk > 480000) return 2; else if (cdclk > 312000) return 1; else return 0; } static void icl_readout_refclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; switch (dssm) { default: MISSING_CASE(dssm); fallthrough; case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: cdclk_config->ref = 24000; break; case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: cdclk_config->ref = 19200; break; case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: cdclk_config->ref = 38400; break; } } static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 val, ratio; if (IS_DG2(dev_priv)) cdclk_config->ref = 38400; else if (DISPLAY_VER(dev_priv) >= 11) icl_readout_refclk(dev_priv, cdclk_config); else cdclk_config->ref = 19200; val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || (val & BXT_DE_PLL_LOCK) == 0) { /* * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but * setting it to zero is a way to signal that. */ cdclk_config->vco = 0; return; } /* * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register, * gen9lp had it in a separate PLL control register. */ if (DISPLAY_VER(dev_priv) >= 11) ratio = val & ICL_CDCLK_PLL_RATIO_MASK; else ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; cdclk_config->vco = ratio * cdclk_config->ref; } static void bxt_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { u32 squash_ctl = 0; u32 divider; int div; bxt_de_pll_readout(dev_priv, cdclk_config); if (DISPLAY_VER(dev_priv) >= 12) cdclk_config->bypass = cdclk_config->ref / 2; else if (DISPLAY_VER(dev_priv) >= 11) cdclk_config->bypass = 50000; else cdclk_config->bypass = cdclk_config->ref; if (cdclk_config->vco == 0) { cdclk_config->cdclk = cdclk_config->bypass; goto out; } divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: div = 2; break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: div = 3; break; case BXT_CDCLK_CD2X_DIV_SEL_2: div = 4; break; case BXT_CDCLK_CD2X_DIV_SEL_4: div = 8; break; default: MISSING_CASE(divider); return; } if (HAS_CDCLK_SQUASH(dev_priv)) squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); if (squash_ctl & CDCLK_SQUASH_ENABLE) { u16 waveform; int size; size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1; waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size); cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) * cdclk_config->vco, size * div); } else { cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); } out: /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) { intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); dev_priv->display.cdclk.hw.vco = 0; } static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) { int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); dev_priv->display.cdclk.hw.vco = vco; } static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) { intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE, 0); /* Timeout 200us */ if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); dev_priv->display.cdclk.hw.vco = 0; } static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) { int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; val = ICL_CDCLK_PLL_RATIO(ratio); intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); val |= BXT_DE_PLL_PLL_ENABLE; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); dev_priv->display.cdclk.hw.vco = vco; } static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) { int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; /* Write PLL ratio without disabling */ val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); /* Submit freq change request */ val |= BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); dev_priv->display.cdclk.hw.vco = vco; } static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { if (DISPLAY_VER(dev_priv) >= 12) { if (pipe == INVALID_PIPE) return TGL_CDCLK_CD2X_PIPE_NONE; else return TGL_CDCLK_CD2X_PIPE(pipe); } else if (DISPLAY_VER(dev_priv) >= 11) { if (pipe == INVALID_PIPE) return ICL_CDCLK_CD2X_PIPE_NONE; else return ICL_CDCLK_CD2X_PIPE(pipe); } else { if (pipe == INVALID_PIPE) return BXT_CDCLK_CD2X_PIPE_NONE; else return BXT_CDCLK_CD2X_PIPE(pipe); } } static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, int cdclk, int vco) { /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 2: return BXT_CDCLK_CD2X_DIV_SEL_1; case 3: return BXT_CDCLK_CD2X_DIV_SEL_1_5; case 4: return BXT_CDCLK_CD2X_DIV_SEL_2; case 8: return BXT_CDCLK_CD2X_DIV_SEL_4; } } static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, int cdclk) { const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) return table[i].waveform; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", cdclk, dev_priv->display.cdclk.hw.ref); return 0xffff; } static void icl_cdclk_pll_update(struct drm_i915_private *i915, int vco) { if (i915->display.cdclk.hw.vco != 0 && i915->display.cdclk.hw.vco != vco) icl_cdclk_pll_disable(i915); if (i915->display.cdclk.hw.vco != vco) icl_cdclk_pll_enable(i915, vco); } static void bxt_cdclk_pll_update(struct drm_i915_private *i915, int vco) { if (i915->display.cdclk.hw.vco != 0 && i915->display.cdclk.hw.vco != vco) bxt_de_pll_disable(i915); if (i915->display.cdclk.hw.vco != vco) bxt_de_pll_enable(i915, vco); } static void dg2_cdclk_squash_program(struct drm_i915_private *i915, u16 waveform) { u32 squash_ctl = 0; if (waveform) squash_ctl = CDCLK_SQUASH_ENABLE | CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl); } static bool cdclk_pll_is_unknown(unsigned int vco) { /* * Ensure driver does not take the crawl path for the * case when the vco is set to ~0 in the * sanitize path. */ return vco == ~0; } static int cdclk_squash_divider(u16 waveform) { return hweight16(waveform ?: 0xffff); } static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915, const struct intel_cdclk_config *old_cdclk_config, const struct intel_cdclk_config *new_cdclk_config, struct intel_cdclk_config *mid_cdclk_config) { u16 old_waveform, new_waveform, mid_waveform; int size = 16; int div = 2; /* Return if PLL is in an unknown state, force a complete disable and re-enable. */ if (cdclk_pll_is_unknown(old_cdclk_config->vco)) return false; /* Return if both Squash and Crawl are not present */ if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) return false; old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk); new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk); /* Return if Squash only or Crawl only is the desired action */ if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 || old_cdclk_config->vco == new_cdclk_config->vco || old_waveform == new_waveform) return false; *mid_cdclk_config = *new_cdclk_config; /* * Populate the mid_cdclk_config accordingly. * - If moving to a higher cdclk, the desired action is squashing. * The mid cdclk config should have the new (squash) waveform. * - If moving to a lower cdclk, the desired action is crawling. * The mid cdclk config should have the new vco. */ if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) { mid_cdclk_config->vco = old_cdclk_config->vco; mid_waveform = new_waveform; } else { mid_cdclk_config->vco = new_cdclk_config->vco; mid_waveform = old_waveform; } mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) * mid_cdclk_config->vco, size * div); /* make sure the mid clock came out sane */ drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk < min(old_cdclk_config->cdclk, new_cdclk_config->cdclk)); drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk > i915->display.cdclk.max_cdclk_freq); drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) != mid_waveform); return true; } static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) { return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) && dev_priv->display.cdclk.hw.vco > 0 && HAS_CDCLK_SQUASH(dev_priv)); } static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 val; u16 waveform; int clock; if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { if (dev_priv->display.cdclk.hw.vco != vco) adlp_cdclk_pll_crawl(dev_priv, vco); } else if (DISPLAY_VER(dev_priv) >= 11) { /* wa_15010685871: dg2, mtl */ if (pll_enable_wa_needed(dev_priv)) dg2_cdclk_squash_program(dev_priv, 0); icl_cdclk_pll_update(dev_priv, vco); } else bxt_cdclk_pll_update(dev_priv, vco); waveform = cdclk_squash_waveform(dev_priv, cdclk); if (waveform) clock = vco / 2; else clock = cdclk; if (HAS_CDCLK_SQUASH(dev_priv)) dg2_cdclk_squash_program(dev_priv, waveform); val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | bxt_cdclk_cd2x_pipe(dev_priv, pipe) | skl_cdclk_decimal(cdclk); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && cdclk >= 500000) val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; intel_de_write(dev_priv, CDCLK_CTL, val); if (pipe != INVALID_PIPE) intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); } static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { struct intel_cdclk_config mid_cdclk_config; int cdclk = cdclk_config->cdclk; int ret = 0; /* * Inform power controller of upcoming frequency change. * Display versions 14 and beyond do not follow the PUnit * mailbox communication, skip * this step. */ if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) /* NOOP */; else if (DISPLAY_VER(dev_priv) >= 11) ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); else /* * BSpec requires us to wait up to 150usec, but that leads to * timeouts; the 2ms used here is based on experiment. */ ret = snb_pcode_write_timeout(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000, 150, 2); if (ret) { drm_err(&dev_priv->drm, "Failed to inform PCU about cdclk change (err %d, freq %d)\n", ret, cdclk); return; } if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw, cdclk_config, &mid_cdclk_config)) { _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe); _bxt_set_cdclk(dev_priv, cdclk_config, pipe); } else { _bxt_set_cdclk(dev_priv, cdclk_config, pipe); } if (DISPLAY_VER(dev_priv) >= 14) /* * NOOP - No Pcode communication needed for * Display versions 14 and beyond */; else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv)) ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, cdclk_config->voltage_level); if (DISPLAY_VER(dev_priv) < 11) { /* * The timeout isn't specified, the 2ms used here is based on * experiment. * FIXME: Waiting for the request completion could be delayed * until the next PCODE request based on BSpec. */ ret = snb_pcode_write_timeout(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, cdclk_config->voltage_level, 150, 2); } if (ret) { drm_err(&dev_priv->drm, "PCode CDCLK freq set failed, (err %d, freq %d)\n", ret, cdclk); return; } intel_update_cdclk(dev_priv); if (DISPLAY_VER(dev_priv) >= 11) /* * Can't read out the voltage level :( * Let's just assume everything is as expected. */ dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level; } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; int cdclk, clock, vco; intel_update_cdclk(dev_priv); intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); if (dev_priv->display.cdclk.hw.vco == 0 || dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock * * Some BIOS versions leave an incorrect decimal frequency value and * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, * so sanitize this register. */ cdctl = intel_de_read(dev_priv, CDCLK_CTL); /* * Let's ignore the pipe field, since BIOS could have configured the * dividers both synching to an active pipe, or asynchronously * (PIPE_NONE). */ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); /* Make sure this is a legal cdclk value for the platform */ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); if (cdclk != dev_priv->display.cdclk.hw.cdclk) goto sanitize; /* Make sure the VCO is correct for the cdclk */ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); if (vco != dev_priv->display.cdclk.hw.vco) goto sanitize; expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ if (HAS_CDCLK_SQUASH(dev_priv)) clock = dev_priv->display.cdclk.hw.vco / 2; else clock = dev_priv->display.cdclk.hw.cdclk; expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, dev_priv->display.cdclk.hw.vco); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && dev_priv->display.cdclk.hw.cdclk >= 500000) expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; if (cdctl == expected) /* All well; nothing to sanitize */ return; sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ dev_priv->display.cdclk.hw.vco = -1; } static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config; bxt_sanitize_cdclk(dev_priv); if (dev_priv->display.cdclk.hw.cdclk != 0 && dev_priv->display.cdclk.hw.vco != 0) return; cdclk_config = dev_priv->display.cdclk.hw; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); cdclk_config.voltage_level = intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } /** * intel_cdclk_init_hw - Initialize CDCLK hardware * @i915: i915 device * * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and * sanitizing the state of the hardware if needed. This is generally done only * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. */ void intel_cdclk_init_hw(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) bxt_cdclk_init_hw(i915); else if (DISPLAY_VER(i915) == 9) skl_cdclk_init_hw(i915); } /** * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware * @i915: i915 device * * Uninitialize CDCLK. This is done only during the display core * uninitialization sequence. */ void intel_cdclk_uninit_hw(struct drm_i915_private *i915) { if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) bxt_cdclk_uninit_hw(i915); else if (DISPLAY_VER(i915) == 9) skl_cdclk_uninit_hw(i915); } static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { u16 old_waveform; u16 new_waveform; drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco)); if (a->vco == 0 || b->vco == 0) return false; if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915)) return false; old_waveform = cdclk_squash_waveform(i915, a->cdclk); new_waveform = cdclk_squash_waveform(i915, b->cdclk); return a->vco != b->vco && old_waveform != new_waveform; } static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { int a_div, b_div; if (!HAS_CDCLK_CRAWL(dev_priv)) return false; /* * The vco and cd2x divider will change independently * from each, so we disallow cd2x change when crawling. */ a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk); b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk); return a->vco != 0 && b->vco != 0 && a->vco != b->vco && a_div == b_div && a->ref == b->ref; } static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { /* * FIXME should store a bit more state in intel_cdclk_config * to differentiate squasher vs. cd2x divider properly. For * the moment all platforms with squasher use a fixed cd2x * divider. */ if (!HAS_CDCLK_SQUASH(dev_priv)) return false; return a->cdclk != b->cdclk && a->vco != 0 && a->vco == b->vco && a->ref == b->ref; } /** * intel_cdclk_needs_modeset - Determine if changong between the CDCLK * configurations requires a modeset on all pipes * @a: first CDCLK configuration * @b: second CDCLK configuration * * Returns: * True if changing between the two CDCLK configurations * requires all pipes to be off, false if not. */ bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { return a->cdclk != b->cdclk || a->vco != b->vco || a->ref != b->ref; } /** * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK * configurations requires only a cd2x divider update * @dev_priv: i915 device * @a: first CDCLK configuration * @b: second CDCLK configuration * * Returns: * True if changing between the two CDCLK configurations * can be done with just a cd2x divider update, false if not. */ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { /* Older hw doesn't have the capability */ if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) return false; /* * FIXME should store a bit more state in intel_cdclk_config * to differentiate squasher vs. cd2x divider properly. For * the moment all platforms with squasher use a fixed cd2x * divider. */ if (HAS_CDCLK_SQUASH(dev_priv)) return false; return a->cdclk != b->cdclk && a->vco != 0 && a->vco == b->vco && a->ref == b->ref; } /** * intel_cdclk_changed - Determine if two CDCLK configurations are different * @a: first CDCLK configuration * @b: second CDCLK configuration * * Returns: * True if the CDCLK configurations don't match, false if they do. */ static bool intel_cdclk_changed(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b) { return intel_cdclk_needs_modeset(a, b) || a->voltage_level != b->voltage_level; } void intel_cdclk_dump_config(struct drm_i915_private *i915, const struct intel_cdclk_config *cdclk_config, const char *context) { drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", context, cdclk_config->cdclk, cdclk_config->vco, cdclk_config->ref, cdclk_config->bypass, cdclk_config->voltage_level); } static void intel_pcode_notify(struct drm_i915_private *i915, u8 voltage_level, u8 active_pipe_count, u16 cdclk, bool cdclk_update_valid, bool pipe_count_update_valid) { int ret; u32 update_mask = 0; if (!IS_DG2(i915)) return; update_mask = DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, active_pipe_count, voltage_level); if (cdclk_update_valid) update_mask |= DISPLAY_TO_PCODE_CDCLK_VALID; if (pipe_count_update_valid) update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID; ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE | update_mask, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) drm_err(&i915->drm, "Failed to inform PCU about display config (err %d)\n", ret); } /** * intel_set_cdclk - Push the CDCLK configuration to the hardware * @dev_priv: i915 device * @cdclk_config: new CDCLK configuration * @pipe: pipe with which to synchronize the update * * Program the hardware based on the passed in CDCLK state, * if necessary. */ static void intel_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { struct intel_encoder *encoder; if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) return; intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_pause(intel_dp); } intel_audio_cdclk_change_pre(dev_priv); /* * Lock aux/gmbus while we change cdclk in case those * functions use cdclk. Not all platforms/ports do, * but we'll lock them all for simplicity. */ mutex_lock(&dev_priv->display.gmbus.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, &dev_priv->display.gmbus.mutex); } intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_unlock(&intel_dp->aux.hw_mutex); } mutex_unlock(&dev_priv->display.gmbus.mutex); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_psr_resume(intel_dp); } intel_audio_cdclk_change_post(dev_priv); if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]"); intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); } } static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0; bool change_cdclk, update_pipe_count; if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual) && new_cdclk_state->active_pipes == old_cdclk_state->active_pipes) return; /* According to "Sequence Before Frequency Change", voltage level set to 0x3 */ voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX; change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; update_pipe_count = hweight8(new_cdclk_state->active_pipes) > hweight8(old_cdclk_state->active_pipes); /* * According to "Sequence Before Frequency Change", * if CDCLK is increasing, set bits 25:16 to upcoming CDCLK, * if CDCLK is decreasing or not changing, set bits 25:16 to current CDCLK, * which basically means we choose the maximum of old and new CDCLK, if we know both */ if (change_cdclk) cdclk = max(new_cdclk_state->actual.cdclk, old_cdclk_state->actual.cdclk); /* * According to "Sequence For Pipe Count Change", * if pipe count is increasing, set bits 25:16 to upcoming pipe count * (power well is enabled) * no action if it is decreasing, before the change */ if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, change_cdclk, update_pipe_count); } static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0; bool update_cdclk, update_pipe_count; /* According to "Sequence After Frequency Change", set voltage to used level */ voltage_level = new_cdclk_state->actual.voltage_level; update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk; update_pipe_count = hweight8(new_cdclk_state->active_pipes) < hweight8(old_cdclk_state->active_pipes); /* * According to "Sequence After Frequency Change", * set bits 25:16 to current CDCLK */ if (update_cdclk) cdclk = new_cdclk_state->actual.cdclk; /* * According to "Sequence For Pipe Count Change", * if pipe count is decreasing, set bits 25:16 to current pipe count, * after the change(power well is disabled) * no action if it is increasing, after the change */ if (update_pipe_count) num_active_pipes = hweight8(new_cdclk_state->active_pipes); intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk, update_cdclk, update_pipe_count); } /** * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware * @state: intel atomic state * * Program the hardware before updating the HW plane state based on the * new CDCLK state, if necessary. */ void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); enum pipe pipe = new_cdclk_state->pipe; if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) return; if (IS_DG2(i915)) intel_cdclk_pcode_pre_notify(state); if (pipe == INVALID_PIPE || old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); } } /** * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware * @state: intel atomic state * * Program the hardware after updating the HW plane state based on the * new CDCLK state, if necessary. */ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state = intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); enum pipe pipe = new_cdclk_state->pipe; if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) return; if (IS_DG2(i915)) intel_cdclk_pcode_post_notify(state); if (pipe != INVALID_PIPE && old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); } } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int pixel_rate = crtc_state->pixel_rate; if (DISPLAY_VER(dev_priv) >= 10) return DIV_ROUND_UP(pixel_rate, 2); else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; else if (IS_CHERRYVIEW(dev_priv)) return DIV_ROUND_UP(pixel_rate * 100, 95); else if (crtc_state->double_wide) return DIV_ROUND_UP(pixel_rate * 100, 90 * 2); else return DIV_ROUND_UP(pixel_rate * 100, 90); } static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_plane *plane; int min_cdclk = 0; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); return min_cdclk; } int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int min_cdclk; if (!crtc_state->hw.enable) return 0; min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state)) min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95); /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else * there may be audio corruption or screen corruption." This cdclk * restriction for GLK is 316.8 MHz. */ if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio && crtc_state->port_clock >= 540000 && crtc_state->lane_count == 4) { if (DISPLAY_VER(dev_priv) == 10) { /* Display WA #1145: glk */ min_cdclk = max(316800, min_cdclk); } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) { /* Display WA #1144: skl,bxt */ min_cdclk = max(432000, min_cdclk); } } /* * According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. */ if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); /* * "For DP audio configuration, cdclk frequency shall be set to * meet the following requirements: * DP Link Frequency(MHz) | Cdclk frequency(MHz) * 270 | 320 or higher * 162 | 200 or higher" */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) min_cdclk = max(crtc_state->port_clock, min_cdclk); /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && IS_VALLEYVIEW(dev_priv)) min_cdclk = max(320000, min_cdclk); /* * On Geminilake once the CDCLK gets as low as 79200 * picture gets unstable, despite that values are * correct for DSI PLL and DE PLL. */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && IS_GEMINILAKE(dev_priv)) min_cdclk = max(158400, min_cdclk); /* Account for additional needs from the planes */ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); /* * When we decide to use only one VDSC engine, since * each VDSC operates with 1 ppc throughput, pixel clock * cannot be higher than the VDSC clock (cdclk) * If there 2 VDSC engines, then pixel clock can't be higher than * VDSC clock(cdclk) * 2 and so on. */ if (crtc_state->dsc.compression_enable) { int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); min_cdclk = max_t(int, min_cdclk, DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances)); } /* * HACK. Currently for TGL/DG2 platforms we calculate * min_cdclk initially based on pixel_rate divided * by 2, accounting for also plane requirements, * however in some cases the lowest possible CDCLK * doesn't work and causing the underruns. * Explicitly stating here that this seems to be currently * rather a Hack, than final solution. */ if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) { /* * Clamp to max_cdclk_freq in case pixel rate is higher, * in order not to break an 8K, but still leave W/A at place. */ min_cdclk = max_t(int, min_cdclk, min_t(int, crtc_state->pixel_rate, dev_priv->display.cdclk.max_cdclk_freq)); } return min_cdclk; } static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_bw_state *bw_state; struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int min_cdclk, i; enum pipe pipe; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { int ret; min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); if (min_cdclk < 0) return min_cdclk; if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk) continue; cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) return ret; } bw_state = intel_atomic_get_new_bw_state(state); if (bw_state) { min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state); if (cdclk_state->bw_min_cdclk != min_cdclk) { int ret; cdclk_state->bw_min_cdclk = min_cdclk; ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) return ret; } } min_cdclk = max(cdclk_state->force_min_cdclk, cdclk_state->bw_min_cdclk); for_each_pipe(dev_priv, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { drm_dbg_kms(&dev_priv->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->display.cdclk.max_cdclk_freq); return -EINVAL; } return min_cdclk; } /* * Account for port clock min voltage level requirements. * This only really does something on DISPLA_VER >= 11 but can be * called on earlier platforms as well. * * Note that this functions assumes that 0 is * the lowest voltage value, and higher values * correspond to increasingly higher voltages. * * Should that relationship no longer hold on * future platforms this code will need to be * adjusted. */ static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; u8 min_voltage_level; int i; enum pipe pipe; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { int ret; if (crtc_state->hw.enable) min_voltage_level = crtc_state->min_voltage_level; else min_voltage_level = 0; if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level) continue; cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level; ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) return ret; } min_voltage_level = 0; for_each_pipe(dev_priv, pipe) min_voltage_level = max(cdclk_state->min_voltage_level[pipe], min_voltage_level); return min_voltage_level; } static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, cdclk; min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); if (!cdclk_state->active_pipes) { cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } return 0; } static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk, cdclk; min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; cdclk = bdw_calc_cdclk(min_cdclk); cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = bdw_calc_voltage_level(cdclk); if (!cdclk_state->active_pipes) { cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk); cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = bdw_calc_voltage_level(cdclk); } else { cdclk_state->actual = cdclk_state->logical; } return 0; } static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int vco, i; vco = cdclk_state->logical.vco; if (!vco) vco = dev_priv->skl_preferred_vco_freq; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.enable) continue; if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) continue; /* * DPLL0 VCO may need to be adjusted to get the correct * clock for eDP. This will affect cdclk as well. */ switch (crtc_state->port_clock / 2) { case 108000: case 216000: vco = 8640000; break; default: vco = 8100000; break; } } return vco; } static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; vco = skl_dpll0_vco(cdclk_state); cdclk = skl_calc_cdclk(min_cdclk, vco); cdclk_state->logical.vco = vco; cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = skl_calc_voltage_level(cdclk); if (!cdclk_state->active_pipes) { cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco); cdclk_state->actual.vco = vco; cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = skl_calc_voltage_level(cdclk); } else { cdclk_state->actual = cdclk_state->logical; } return 0; } static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, min_voltage_level, cdclk, vco; min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; min_voltage_level = bxt_compute_min_voltage_level(cdclk_state); if (min_voltage_level < 0) return min_voltage_level; cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); cdclk_state->logical.vco = vco; cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = max_t(int, min_voltage_level, intel_cdclk_calc_voltage_level(dev_priv, cdclk)); if (!cdclk_state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); cdclk_state->actual.vco = vco; cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = intel_cdclk_calc_voltage_level(dev_priv, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } return 0; } static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk; /* * We can't change the cdclk frequency, but we still want to * check that the required minimum frequency doesn't exceed * the actual cdclk frequency. */ min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; return 0; } static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj) { struct intel_cdclk_state *cdclk_state; cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL); if (!cdclk_state) return NULL; cdclk_state->pipe = INVALID_PIPE; return &cdclk_state->base; } static void intel_cdclk_destroy_state(struct intel_global_obj *obj, struct intel_global_state *state) { kfree(state); } static const struct intel_global_state_funcs intel_cdclk_funcs = { .atomic_duplicate_state = intel_cdclk_duplicate_state, .atomic_destroy_state = intel_cdclk_destroy_state, }; struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *cdclk_state; cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj); if (IS_ERR(cdclk_state)) return ERR_CAST(cdclk_state); return to_intel_cdclk_state(cdclk_state); } int intel_cdclk_atomic_check(struct intel_atomic_state *state, bool *need_cdclk_calc) { const struct intel_cdclk_state *old_cdclk_state; const struct intel_cdclk_state *new_cdclk_state; struct intel_plane_state __maybe_unused *plane_state; struct intel_plane *plane; int ret; int i; /* * active_planes bitmask has been updated, and potentially affected * planes are part of the state. We can now compute the minimum cdclk * for each plane. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); if (ret) return ret; } ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc); if (ret) return ret; old_cdclk_state = intel_atomic_get_old_cdclk_state(state); new_cdclk_state = intel_atomic_get_new_cdclk_state(state); if (new_cdclk_state && old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) *need_cdclk_calc = true; return 0; } int intel_cdclk_init(struct drm_i915_private *dev_priv) { struct intel_cdclk_state *cdclk_state; cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); if (!cdclk_state) return -ENOMEM; intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj, &cdclk_state->base, &intel_cdclk_funcs); return 0; } static bool intel_cdclk_need_serialize(struct drm_i915_private *i915, const struct intel_cdclk_state *old_cdclk_state, const struct intel_cdclk_state *new_cdclk_state) { bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) != hweight8(new_cdclk_state->active_pipes); bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual); /* * We need to poke hw for gen >= 12, because we notify PCode if * pipe power well count changes. */ return cdclk_changed || (IS_DG2(i915) && power_well_cnt_changed); } int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_cdclk_state *old_cdclk_state; struct intel_cdclk_state *new_cdclk_state; enum pipe pipe = INVALID_PIPE; int ret; new_cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(new_cdclk_state)) return PTR_ERR(new_cdclk_state); old_cdclk_state = intel_atomic_get_old_cdclk_state(state); new_cdclk_state->active_pipes = intel_calc_active_pipes(state, old_cdclk_state->active_pipes); ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state); if (ret) return ret; if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) { /* * Also serialize commits across all crtcs * if the actual hw needs to be poked. */ ret = intel_atomic_serialize_global_state(&new_cdclk_state->base); if (ret) return ret; } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || intel_cdclk_changed(&old_cdclk_state->logical, &new_cdclk_state->logical)) { ret = intel_atomic_lock_global_state(&new_cdclk_state->base); if (ret) return ret; } else { return 0; } if (is_power_of_2(new_cdclk_state->active_pipes) && intel_cdclk_can_cd2x_update(dev_priv, &old_cdclk_state->actual, &new_cdclk_state->actual)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); if (intel_crtc_needs_modeset(crtc_state)) pipe = INVALID_PIPE; } if (intel_cdclk_can_crawl_and_squash(dev_priv, &old_cdclk_state->actual, &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via crawling and squashing\n"); } else if (intel_cdclk_can_squash(dev_priv, &old_cdclk_state->actual, &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via squashing\n"); } else if (intel_cdclk_can_crawl(dev_priv, &old_cdclk_state->actual, &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via crawling\n"); } else if (pipe != INVALID_PIPE) { new_cdclk_state->pipe = pipe; drm_dbg_kms(&dev_priv->drm, "Can change cdclk cd2x divider with pipe %c active\n", pipe_name(pipe)); } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual, &new_cdclk_state->actual)) { /* All pipes must be switched off while we change the cdclk. */ ret = intel_modeset_all_pipes(state, "CDCLK change"); if (ret) return ret; drm_dbg_kms(&dev_priv->drm, "Modeset required for cdclk change\n"); } drm_dbg_kms(&dev_priv->drm, "New cdclk calculated to be logical %u kHz, actual %u kHz\n", new_cdclk_state->logical.cdclk, new_cdclk_state->actual.cdclk); drm_dbg_kms(&dev_priv->drm, "New voltage level calculated to be logical %u, actual %u\n", new_cdclk_state->logical.voltage_level, new_cdclk_state->actual.voltage_level); return 0; } static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq; if (DISPLAY_VER(dev_priv) >= 10) return 2 * max_cdclk_freq; else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) return max_cdclk_freq*95/100; else if (DISPLAY_VER(dev_priv) < 4) return 2*max_cdclk_freq*90/100; else return max_cdclk_freq*90/100; } /** * intel_update_max_cdclk - Determine the maximum support CDCLK frequency * @dev_priv: i915 device * * Determine the maximum CDCLK frequency the platform supports, and also * derive the maximum dot clock frequency the maximum CDCLK frequency * allows. */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { if (dev_priv->display.cdclk.hw.ref == 24000) dev_priv->display.cdclk.max_cdclk_freq = 552000; else dev_priv->display.cdclk.max_cdclk_freq = 556800; } else if (DISPLAY_VER(dev_priv) >= 11) { if (dev_priv->display.cdclk.hw.ref == 24000) dev_priv->display.cdclk.max_cdclk_freq = 648000; else dev_priv->display.cdclk.max_cdclk_freq = 652800; } else if (IS_GEMINILAKE(dev_priv)) { dev_priv->display.cdclk.max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { dev_priv->display.cdclk.max_cdclk_freq = 624000; } else if (DISPLAY_VER(dev_priv) == 9) { u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; vco = dev_priv->skl_preferred_vco_freq; drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); /* * Use the lower (vco 8640) cdclk values as a * first guess. skl_calc_cdclk() will correct it * if the preferred vco is 8100 instead. */ if (limit == SKL_DFSM_CDCLK_LIMIT_675) max_cdclk = 617143; else if (limit == SKL_DFSM_CDCLK_LIMIT_540) max_cdclk = 540000; else if (limit == SKL_DFSM_CDCLK_LIMIT_450) max_cdclk = 432000; else max_cdclk = 308571; dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow * 540 MHz for ULX and 675 Mhz for ULT. * How can we know if extra cooling is * available? PCI ID, VTB, something else? */ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULX(dev_priv)) dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BROADWELL_ULT(dev_priv)) dev_priv->display.cdclk.max_cdclk_freq = 540000; else dev_priv->display.cdclk.max_cdclk_freq = 675000; } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.cdclk.max_cdclk_freq = 320000; } else if (IS_VALLEYVIEW(dev_priv)) { dev_priv->display.cdclk.max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; } dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", dev_priv->display.cdclk.max_cdclk_freq); drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", dev_priv->max_dotclk_freq); } /** * intel_update_cdclk - Determine the current CDCLK frequency * @dev_priv: i915 device * * Determine the current CDCLK frequency. */ void intel_update_cdclk(struct drm_i915_private *dev_priv) { intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): * Programmng [sic] note: bit[9:2] should be programmed to the number * of cdclk that generates 4MHz reference clock freq which is used to * generate GMBus clock. This will vary with the cdclk freq. */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_de_write(dev_priv, GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000)); } static int dg1_rawclk(struct drm_i915_private *dev_priv) { /* * DG1 always uses a 38.4 MHz rawclk. The bspec tells us * "Program Numerator=2, Denominator=4, Divider=37 decimal." */ intel_de_write(dev_priv, PCH_RAWCLK_FREQ, CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); return 38400; } static int cnp_rawclk(struct drm_i915_private *dev_priv) { u32 rawclk; int divider, fraction; if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { /* 24 MHz */ divider = 24000; fraction = 0; } else { /* 19.2 MHz */ divider = 19000; fraction = 200; } rawclk = CNP_RAWCLK_DIV(divider / 1000); if (fraction) { int numerator = 1; rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, fraction) - 1); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) rawclk |= ICP_RAWCLK_NUM(numerator); } intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); return divider + fraction; } static int pch_rawclk(struct drm_i915_private *dev_priv) { return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } static int vlv_hrawclk(struct drm_i915_private *dev_priv) { /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } static int i9xx_hrawclk(struct drm_i915_private *dev_priv) { u32 clkcfg; /* * hrawclock is 1/4 the FSB frequency * * Note that this only reads the state of the FSB * straps, not the actual FSB frequency. Some BIOSen * let you configure each independently. Ideally we'd * read out the actual FSB frequency but sadly we * don't know which registers have that information, * and all the relevant docs have gone to bit heaven :( */ clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK; if (IS_MOBILE(dev_priv)) { switch (clkcfg) { case CLKCFG_FSB_400: return 100000; case CLKCFG_FSB_533: return 133333; case CLKCFG_FSB_667: return 166667; case CLKCFG_FSB_800: return 200000; case CLKCFG_FSB_1067: return 266667; case CLKCFG_FSB_1333: return 333333; default: MISSING_CASE(clkcfg); return 133333; } } else { switch (clkcfg) { case CLKCFG_FSB_400_ALT: return 100000; case CLKCFG_FSB_533: return 133333; case CLKCFG_FSB_667: return 166667; case CLKCFG_FSB_800: return 200000; case CLKCFG_FSB_1067_ALT: return 266667; case CLKCFG_FSB_1333_ALT: return 333333; case CLKCFG_FSB_1600_ALT: return 400000; default: return 133333; } } } /** * intel_read_rawclk - Determine the current RAWCLK frequency * @dev_priv: i915 device * * Determine the current RAWCLK frequency. RAWCLK is a fixed * frequency clock so this needs to done only once. */ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) { u32 freq; if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) freq = dg1_rawclk(dev_priv); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) /* * MTL always uses a 38.4 MHz rawclk. The bspec tells us * "RAWCLK_FREQ defaults to the values for 38.4 and does * not need to be programmed." */ freq = 38400; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) freq = pch_rawclk(dev_priv); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) freq = vlv_hrawclk(dev_priv); else if (DISPLAY_VER(dev_priv) >= 3) freq = i9xx_hrawclk(dev_priv); else /* no rawclk on other platforms, or no need to know it */ return 0; return freq; } static int i915_cdclk_info_show(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = m->private; seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info); void intel_cdclk_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root, i915, &i915_cdclk_info_fops); } static const struct intel_cdclk_funcs mtl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = tgl_calc_voltage_level, }; static const struct intel_cdclk_funcs rplu_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = rplu_calc_voltage_level, }; static const struct intel_cdclk_funcs tgl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = tgl_calc_voltage_level, }; static const struct intel_cdclk_funcs ehl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = ehl_calc_voltage_level, }; static const struct intel_cdclk_funcs icl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = icl_calc_voltage_level, }; static const struct intel_cdclk_funcs bxt_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, .calc_voltage_level = bxt_calc_voltage_level, }; static const struct intel_cdclk_funcs skl_cdclk_funcs = { .get_cdclk = skl_get_cdclk, .set_cdclk = skl_set_cdclk, .modeset_calc_cdclk = skl_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs bdw_cdclk_funcs = { .get_cdclk = bdw_get_cdclk, .set_cdclk = bdw_set_cdclk, .modeset_calc_cdclk = bdw_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs chv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = chv_set_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs vlv_cdclk_funcs = { .get_cdclk = vlv_get_cdclk, .set_cdclk = vlv_set_cdclk, .modeset_calc_cdclk = vlv_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs hsw_cdclk_funcs = { .get_cdclk = hsw_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* SNB, IVB, 965G, 945G */ static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { .get_cdclk = fixed_400mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs ilk_cdclk_funcs = { .get_cdclk = fixed_450mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs gm45_cdclk_funcs = { .get_cdclk = gm45_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* G45 uses G33 */ static const struct intel_cdclk_funcs i965gm_cdclk_funcs = { .get_cdclk = i965gm_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* i965G uses fixed 400 */ static const struct intel_cdclk_funcs pnv_cdclk_funcs = { .get_cdclk = pnv_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs g33_cdclk_funcs = { .get_cdclk = g33_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i945gm_cdclk_funcs = { .get_cdclk = i945gm_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /* i945G uses fixed 400 */ static const struct intel_cdclk_funcs i915gm_cdclk_funcs = { .get_cdclk = i915gm_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i915g_cdclk_funcs = { .get_cdclk = fixed_333mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i865g_cdclk_funcs = { .get_cdclk = fixed_266mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i85x_cdclk_funcs = { .get_cdclk = i85x_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i845g_cdclk_funcs = { .get_cdclk = fixed_200mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; static const struct intel_cdclk_funcs i830_cdclk_funcs = { .get_cdclk = fixed_133mhz_get_cdclk, .modeset_calc_cdclk = fixed_modeset_calc_cdclk, }; /** * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks * @dev_priv: i915 device */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_METEORLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs; dev_priv->display.cdclk.table = mtl_cdclk_table; } else if (IS_DG2(dev_priv)) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { /* Wa_22011320316:adl-p[a0] */ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; } else if (IS_RAPTORLAKE_U(dev_priv)) { dev_priv->display.cdclk.table = rplu_cdclk_table; dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; } else { dev_priv->display.cdclk.table = adlp_cdclk_table; dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; } } else if (IS_ROCKETLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = rkl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 12) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs; dev_priv->display.cdclk.table = icl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 11) { dev_priv->display.funcs.cdclk = &icl_cdclk_funcs; dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) dev_priv->display.cdclk.table = glk_cdclk_table; else dev_priv->display.cdclk.table = bxt_cdclk_table; } else if (DISPLAY_VER(dev_priv) == 9) { dev_priv->display.funcs.cdclk = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs; } else if (IS_HASWELL(dev_priv)) { dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.funcs.cdclk = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs; } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_IRONLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs; } else if (IS_GM45(dev_priv)) { dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs; } else if (IS_G45(dev_priv)) { dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I965GM(dev_priv)) { dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs; } else if (IS_I965G(dev_priv)) { dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_PINEVIEW(dev_priv)) { dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs; } else if (IS_G33(dev_priv)) { dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I945GM(dev_priv)) { dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs; } else if (IS_I945G(dev_priv)) { dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_I915GM(dev_priv)) { dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs; } else if (IS_I915G(dev_priv)) { dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs; } else if (IS_I865G(dev_priv)) { dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs; } else if (IS_I85X(dev_priv)) { dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs; } else if (IS_I845G(dev_priv)) { dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs; } else if (IS_I830(dev_priv)) { dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; } if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk, "Unknown platform. Assuming i830\n")) dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; }
linux-master
drivers/gpu/drm/i915/display/intel_cdclk.c
/************************************************************************** Copyright © 2006 Dave Airlie All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ #include "intel_display_types.h" #include "intel_dvo_dev.h" #define SIL164_VID 0x0001 #define SIL164_DID 0x0006 #define SIL164_VID_LO 0x00 #define SIL164_VID_HI 0x01 #define SIL164_DID_LO 0x02 #define SIL164_DID_HI 0x03 #define SIL164_REV 0x04 #define SIL164_RSVD 0x05 #define SIL164_FREQ_LO 0x06 #define SIL164_FREQ_HI 0x07 #define SIL164_REG8 0x08 #define SIL164_8_VEN (1<<5) #define SIL164_8_HEN (1<<4) #define SIL164_8_DSEL (1<<3) #define SIL164_8_BSEL (1<<2) #define SIL164_8_EDGE (1<<1) #define SIL164_8_PD (1<<0) #define SIL164_REG9 0x09 #define SIL164_9_VLOW (1<<7) #define SIL164_9_MSEL_MASK (0x7<<4) #define SIL164_9_TSEL (1<<3) #define SIL164_9_RSEN (1<<2) #define SIL164_9_HTPLG (1<<1) #define SIL164_9_MDI (1<<0) #define SIL164_REGC 0x0c #define SIL164_C_SCNT (1<<7) #define SIL164_C_PLLF_MASK (0xf<<1) #define SIL164_C_PLLF_REC (4<<1) #define SIL164_C_PFEN (1<<0) struct sil164_priv { //I2CDevRec d; bool quiet; }; #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!sil->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* Silicon Image 164 driver for chip on i2c bus */ static bool sil164_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the SIL164 chip on the specified i2c bus */ struct sil164_priv *sil; unsigned char ch; sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); if (sil == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = sil; sil->quiet = true; if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) goto out; if (ch != (SIL164_VID & 0xff)) { DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) goto out; if (ch != (SIL164_DID & 0xff)) { DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } sil->quiet = false; DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n"); return true; out: kfree(sil); return false; } static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) { u8 reg9; sil164_readb(dvo, SIL164_REG9, &reg9); if (reg9 & SIL164_9_HTPLG) return connector_status_connected; else return connector_status_disconnected; } static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void sil164_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock * dependencies in the mode setup, we can just leave the * registers alone and everything will work fine. */ /* recommended programming sequence from doc */ /*sil164_writeb(sil, 0x08, 0x30); sil164_writeb(sil, 0x09, 0x00); sil164_writeb(sil, 0x0a, 0x90); sil164_writeb(sil, 0x0c, 0x89); sil164_writeb(sil, 0x08, 0x31);*/ /* don't do much */ sil164_writeb(dvo, SIL164_REG8, SIL164_8_VEN | SIL164_8_HEN); sil164_writeb(dvo, SIL164_REG9, SIL164_9_TSEL); sil164_writeb(dvo, SIL164_REGC, SIL164_C_PLLF_REC | SIL164_C_PFEN); } /* set the SIL164 power state */ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) { int ret; unsigned char ch; ret = sil164_readb(dvo, SIL164_REG8, &ch); if (ret == false) return; if (enable) ch |= SIL164_8_PD; else ch &= ~SIL164_8_PD; sil164_writeb(dvo, SIL164_REG8, ch); } static bool sil164_get_hw_state(struct intel_dvo_device *dvo) { int ret; unsigned char ch; ret = sil164_readb(dvo, SIL164_REG8, &ch); if (ret == false) return false; if (ch & SIL164_8_PD) return true; else return false; } static void sil164_dump_regs(struct intel_dvo_device *dvo) { u8 val; sil164_readb(dvo, SIL164_FREQ_LO, &val); DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); sil164_readb(dvo, SIL164_FREQ_HI, &val); DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG8, &val); DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG9, &val); DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val); sil164_readb(dvo, SIL164_REGC, &val); DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val); } static void sil164_destroy(struct intel_dvo_device *dvo) { struct sil164_priv *sil = dvo->dev_priv; if (sil) { kfree(sil); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops sil164_ops = { .init = sil164_init, .detect = sil164_detect, .mode_valid = sil164_mode_valid, .mode_set = sil164_mode_set, .dpms = sil164_dpms, .get_hw_state = sil164_get_hw_state, .dump_regs = sil164_dump_regs, .destroy = sil164_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_sil164.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel ACPI functions * * _DSM related code stolen from nouveau_acpi.c. */ #include <linux/pci.h> #include <linux/acpi.h> #include <acpi/video.h> #include "i915_drv.h" #include "intel_acpi.h" #include "intel_display_types.h" #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ static const guid_t intel_dsm_guid = GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f, 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c); #define INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED 0 /* No args */ static const guid_t intel_dsm_guid2 = GUID_INIT(0x3e5b41c6, 0xeb1d, 0x4260, 0x9d, 0x15, 0xc7, 0x1f, 0xba, 0xda, 0xe4, 0x14); static char *intel_dsm_port_name(u8 id) { switch (id) { case 0: return "Reserved"; case 1: return "Analog VGA"; case 2: return "LVDS"; case 3: return "Reserved"; case 4: return "HDMI/DVI_B"; case 5: return "HDMI/DVI_C"; case 6: return "HDMI/DVI_D"; case 7: return "DisplayPort_A"; case 8: return "DisplayPort_B"; case 9: return "DisplayPort_C"; case 0xa: return "DisplayPort_D"; case 0xb: case 0xc: case 0xd: return "Reserved"; case 0xe: return "WiDi"; default: return "bad type"; } } static char *intel_dsm_mux_type(u8 type) { switch (type) { case 0: return "unknown"; case 1: return "No MUX, iGPU only"; case 2: return "No MUX, dGPU only"; case 3: return "MUXed between iGPU and dGPU"; default: return "bad type"; } } static void intel_dsm_platform_mux_info(acpi_handle dhandle) { int i; union acpi_object *pkg, *connector_count; pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO, NULL, ACPI_TYPE_PACKAGE); if (!pkg) { DRM_DEBUG_DRIVER("failed to evaluate _DSM\n"); return; } if (!pkg->package.count) { DRM_DEBUG_DRIVER("no connection in _DSM\n"); return; } connector_count = &pkg->package.elements[0]; DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", (unsigned long long)connector_count->integer.value); for (i = 1; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; union acpi_object *connector_id; union acpi_object *info; if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i); continue; } connector_id = &obj->package.elements[0]; info = &obj->package.elements[1]; if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) { DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i); continue; } DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", (unsigned long long)connector_id->integer.value); DRM_DEBUG_DRIVER(" port id: %s\n", intel_dsm_port_name(info->buffer.pointer[0])); DRM_DEBUG_DRIVER(" display mux info: %s\n", intel_dsm_mux_type(info->buffer.pointer[1])); DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", intel_dsm_mux_type(info->buffer.pointer[2])); DRM_DEBUG_DRIVER(" hpd mux info: %s\n", intel_dsm_mux_type(info->buffer.pointer[3])); } ACPI_FREE(pkg); } static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return NULL; if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) { DRM_DEBUG_KMS("no _DSM method for intel device\n"); return NULL; } intel_dsm_platform_mux_info(dhandle); return dhandle; } static bool intel_dsm_detect(void) { acpi_handle dhandle = NULL; char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; int vga_count = 0; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; dhandle = intel_dsm_pci_probe(pdev) ?: dhandle; } if (vga_count == 2 && dhandle) { acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer); DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n", acpi_method_name); return true; } return false; } void intel_register_dsm_handler(void) { if (!intel_dsm_detect()) return; } void intel_unregister_dsm_handler(void) { } void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); acpi_handle dhandle; union acpi_object *obj; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return; obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID, INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL); if (obj) ACPI_FREE(obj); } /* * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices * Attached to the Display Adapter). */ #define ACPI_DISPLAY_INDEX_SHIFT 0 #define ACPI_DISPLAY_INDEX_MASK (0xf << 0) #define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4 #define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4) #define ACPI_DISPLAY_TYPE_SHIFT 8 #define ACPI_DISPLAY_TYPE_MASK (0xf << 8) #define ACPI_DISPLAY_TYPE_OTHER (0 << 8) #define ACPI_DISPLAY_TYPE_VGA (1 << 8) #define ACPI_DISPLAY_TYPE_TV (2 << 8) #define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8) #define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8) #define ACPI_VENDOR_SPECIFIC_SHIFT 12 #define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12) #define ACPI_BIOS_CAN_DETECT (1 << 16) #define ACPI_DEPENDS_ON_VGA (1 << 17) #define ACPI_PIPE_ID_SHIFT 18 #define ACPI_PIPE_ID_MASK (7 << 18) #define ACPI_DEVICE_ID_SCHEME (1ULL << 31) static u32 acpi_display_type(struct intel_connector *connector) { u32 display_type; switch (connector->base.connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: display_type = ACPI_DISPLAY_TYPE_VGA; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: case DRM_MODE_CONNECTOR_9PinDIN: case DRM_MODE_CONNECTOR_TV: display_type = ACPI_DISPLAY_TYPE_TV; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL; break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_DSI: display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL; break; case DRM_MODE_CONNECTOR_Unknown: case DRM_MODE_CONNECTOR_VIRTUAL: display_type = ACPI_DISPLAY_TYPE_OTHER; break; default: MISSING_CASE(connector->base.connector_type); display_type = ACPI_DISPLAY_TYPE_OTHER; break; } return display_type; } void intel_acpi_device_id_update(struct drm_i915_private *dev_priv) { struct drm_device *drm_dev = &dev_priv->drm; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; u8 display_index[16] = {}; /* Populate the ACPI IDs for all connectors for a given drm_device */ drm_connector_list_iter_begin(drm_dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { u32 device_id, type; device_id = acpi_display_type(connector); /* Use display type specific display index. */ type = (device_id & ACPI_DISPLAY_TYPE_MASK) >> ACPI_DISPLAY_TYPE_SHIFT; device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT; connector->acpi_device_id = device_id; } drm_connector_list_iter_end(&conn_iter); } /* NOTE: The connector order must be final before this is called. */ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { struct drm_connector_list_iter conn_iter; struct drm_device *drm_dev = &i915->drm; struct fwnode_handle *fwnode = NULL; struct drm_connector *connector; struct acpi_device *adev; drm_connector_list_iter_begin(drm_dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { /* Always getting the next, even when the last was not used. */ fwnode = device_get_next_child_node(drm_dev->dev, fwnode); if (!fwnode) break; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_DSI: /* * Integrated displays have a specific address 0x1f on * most Intel platforms, but not on all of them. */ adev = acpi_find_child_device(ACPI_COMPANION(drm_dev->dev), 0x1f, 0); if (adev) { connector->fwnode = fwnode_handle_get(acpi_fwnode_handle(adev)); break; } fallthrough; default: connector->fwnode = fwnode_handle_get(fwnode); break; } } drm_connector_list_iter_end(&conn_iter); /* * device_get_next_child_node() takes a reference on the fwnode, if * we stopped iterating because we are out of connectors we need to * put this, otherwise fwnode is NULL and the put is a no-op. */ fwnode_handle_put(fwnode); } void intel_acpi_video_register(struct drm_i915_private *i915) { struct drm_connector_list_iter conn_iter; struct drm_connector *connector; acpi_video_register(); /* * If i915 is driving an internal panel without registering its native * backlight handler try to register the acpi_video backlight. * For panels not driven by i915 another GPU driver may still register * a native backlight later and acpi_video_register_backlight() should * only be called after any native backlights have been registered. */ drm_connector_list_iter_begin(&i915->drm, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_panel *panel = &to_intel_connector(connector)->panel; if (panel->backlight.funcs && !panel->backlight.device) { acpi_video_register_backlight(); break; } } drm_connector_list_iter_end(&conn_iter); }
linux-master
drivers/gpu/drm/i915/display/intel_acpi.c
/* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2017 Google, Inc. * Copyright _ 2017-2019, Intel Corporation. * * Authors: * Sean Paul <[email protected]> * Ramalingam C <[email protected]> */ #include <linux/component.h> #include <linux/i2c.h> #include <linux/random.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/i915_component.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_hdcp_gsc.h" #include "intel_hdcp_regs.h" #include "intel_pcode.h" #define KEY_LOAD_TRIES 5 #define HDCP2_LC_RETRY_CNT 3 static int intel_conn_to_vcpi(struct drm_atomic_state *state, struct intel_connector *connector) { struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_topology_state *mst_state; int vcpi = 0; /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ if (!connector->port) return 0; mgr = connector->port->mgr; drm_modeset_lock(&mgr->base.lock, state->acquire_ctx); mst_state = to_drm_dp_mst_topology_state(mgr->base.state); payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); if (drm_WARN_ON(mgr->dev, !payload)) goto out; vcpi = payload->vcpi; if (drm_WARN_ON(mgr->dev, vcpi < 0)) { vcpi = 0; goto out; } out: return vcpi; } /* * intel_hdcp_required_content_stream selects the most highest common possible HDCP * content_type for all streams in DP MST topology because security f/w doesn't * have any provision to mark content_type for each stream separately, it marks * all available streams with the content_type proivided at the time of port * authentication. This may prohibit the userspace to use type1 content on * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in * DP MST topology. Though it is not compulsory, security fw should change its * policy to mark different content_types for different streams. */ static void intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) { struct hdcp_port_data *data = &dig_port->hdcp_port_data; bool enforce_type0 = false; int k; if (dig_port->hdcp_auth_status) return; if (!dig_port->hdcp_mst_type1_capable) enforce_type0 = true; /* * Apply common protection level across all streams in DP MST Topology. * Use highest supported content type for all streams in DP MST Topology. */ for (k = 0; k < data->k; k++) data->streams[k].stream_type = enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; } static void intel_hdcp_prepare_streams(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { data->streams[0].stream_type = hdcp->content_type; } else { intel_hdcp_required_content_stream(dig_port); } } static bool intel_hdcp_is_ksv_valid(u8 *ksv) { int i, ones = 0; /* KSV has 20 1's and 20 0's */ for (i = 0; i < DRM_HDCP_KSV_LEN; i++) ones += hweight8(ksv[i]); if (ones != 20) return false; return true; } static int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim, u8 *bksv) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret, i, tries = 2; /* HDCP spec states that we must retry the bksv if it is invalid */ for (i = 0; i < tries; i++) { ret = shim->read_bksv(dig_port, bksv); if (ret) return ret; if (intel_hdcp_is_ksv_valid(bksv)) break; } if (i == tries) { drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); return -ENODEV; } return 0; } /* Is HDCP1.4 capable on Platform and Sink */ bool intel_hdcp_capable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); const struct intel_hdcp_shim *shim = connector->hdcp.shim; bool capable = false; u8 bksv[5]; if (!shim) return capable; if (shim->hdcp_capable) { shim->hdcp_capable(dig_port, &capable); } else { if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) capable = true; } return capable; } /* Is HDCP2.2 capable on Platform and Sink */ bool intel_hdcp2_capable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; bool capable = false; /* I915 support for HDCP2.2 */ if (!hdcp->hdcp2_supported) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ if (intel_hdcp_gsc_cs_required(i915)) { struct intel_gt *gt = i915->media_gt; struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL; if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { drm_dbg_kms(&i915->drm, "GSC components required for HDCP2.2 are not ready\n"); return false; } } /* MEI/GSC interface is solid depending on which is used */ mutex_lock(&i915->display.hdcp.hdcp_mutex); if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return false; } mutex_unlock(&i915->display.hdcp.hdcp_mutex); /* Sink's capability for HDCP2.2 */ hdcp->shim->hdcp_2_2_capable(dig_port, &capable); return capable; } static bool intel_hdcp_in_use(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { return intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & HDCP_STATUS_ENC; } static bool intel_hdcp2_in_use(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { return intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS; } static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { int ret, read_ret; bool ksv_ready; /* Poll for ksv list ready (spec says max time allowed is 5s) */ ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready), read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 100 * 1000); if (ret) return ret; if (read_ret) return read_ret; if (!ksv_ready) return -ETIMEDOUT; return 0; } static bool hdcp_key_loadable(struct drm_i915_private *i915) { enum i915_power_well_id id; intel_wakeref_t wakeref; bool enabled = false; /* * On HSW and BDW, Display HW loads the Key as soon as Display resumes. * On all BXT+, SW can load the keys only when the PW#1 is turned on. */ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) id = HSW_DISP_PW_GLOBAL; else id = SKL_DISP_PW_1; /* PG1 (power well #1) needs to be enabled */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) enabled = intel_display_power_well_is_enabled(i915, id); /* * Another req for hdcp key loadability is enabled state of pll for * cdclk. Without active crtc we wont land here. So we are assuming that * cdclk is already on. */ return enabled; } static void intel_hdcp_clear_keys(struct drm_i915_private *i915) { intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); intel_de_write(i915, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); } static int intel_hdcp_load_keys(struct drm_i915_private *i915) { int ret; u32 val; val = intel_de_read(i915, HDCP_KEY_STATUS); if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) return 0; /* * On HSW and BDW HW loads the HDCP1.4 Key when Display comes * out of reset. So if Key is not already loaded, its an error state. */ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) return -ENXIO; /* * Initiate loading the HDCP key from fuses. * * BXT+ platforms, HDCP key needs to be loaded by SW. Only display * version 9 platforms (minus BXT) differ in the key load trigger * process from other platforms. These platforms use the GT Driver * Mailbox interface. */ if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { drm_err(&i915->drm, "Failed to initiate HDCP key load (%d)\n", ret); return ret; } } else { intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); } /* Wait for the keys to load (500us) */ ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) return ret; else if (!(val & HDCP_KEY_LOAD_STATUS)) return -ENXIO; /* Send Aksv over to PCH display for use in authentication */ intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); return 0; } /* Returns updated SHA-1 index */ static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) { intel_de_write(i915, HDCP_SHA_TEXT, sha_text); if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } return 0; } static u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { if (DISPLAY_VER(i915) >= 12) { switch (cpu_transcoder) { case TRANSCODER_A: return HDCP_TRANSA_REP_PRESENT | HDCP_TRANSA_SHA1_M0; case TRANSCODER_B: return HDCP_TRANSB_REP_PRESENT | HDCP_TRANSB_SHA1_M0; case TRANSCODER_C: return HDCP_TRANSC_REP_PRESENT | HDCP_TRANSC_SHA1_M0; case TRANSCODER_D: return HDCP_TRANSD_REP_PRESENT | HDCP_TRANSD_SHA1_M0; default: drm_err(&i915->drm, "Unknown transcoder %d\n", cpu_transcoder); return -EINVAL; } } switch (port) { case PORT_A: return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; case PORT_B: return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; case PORT_C: return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; case PORT_D: return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: drm_err(&i915->drm, "Unknown port %d\n", port); return -EINVAL; } } static int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; int ret, i, j, sha_idx; /* Process V' values from the receiver */ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { ret = shim->read_v_prime_part(dig_port, i, &vprime); if (ret) return ret; intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); } /* * We need to write the concatenation of all device KSVs, BINFO (DP) || * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte * stream is written via the HDCP_SHA_TEXT register in 32-bit * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This * index will keep track of our progress through the 64 bytes as well as * helping us work the 40-bit KSVs through our 32-bit register. * * NOTE: data passed via HDCP_SHA_TEXT should be big-endian */ sha_idx = 0; sha_text = 0; sha_leftovers = 0; rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; /* Fill up the empty slots in sha_text and write it out */ sha_empty = sizeof(sha_text) - sha_leftovers; for (j = 0; j < sha_empty; j++) { u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); sha_text |= ksv[j] << off; } ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); if (!(sha_idx % 64)) intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Store the leftover bytes from the ksv in sha_text */ sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; sha_text = 0; for (j = 0; j < sha_leftovers; j++) sha_text |= ksv[sha_empty + j] << ((sizeof(sha_text) - j - 1) * 8); /* * If we still have room in sha_text for more data, continue. * Otherwise, write it out immediately. */ if (sizeof(sha_text) > sha_leftovers) continue; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_leftovers = 0; sha_text = 0; sha_idx += sizeof(sha_text); } /* * We need to write BINFO/BSTATUS, and M0 now. Depending on how many * bytes are leftover from the last ksv, we might be able to fit them * all in sha_text (first 2 cases), or we might need to split them up * into 2 writes (last 2 cases). */ if (sha_leftovers == 0) { /* Write 16 bits of text, 16 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); ret = intel_write_sha_text(i915, bstatus[0] << 8 | bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 1) { /* Write 24 bits of text, 8 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); sha_text |= bstatus[0] << 16 | bstatus[1] << 8; /* Only 24-bits of data, must be in the LSB */ sha_text = (sha_text & 0xffffff00) >> 8; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 2) { /* Write 32 bits of text */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 8 | bstatus[1]; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); for (i = 0; i < 2; i++) { ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } /* * Terminate the SHA-1 stream by hand. For the other leftover * cases this is appended by the hardware. */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { /* Write 32 bits of text (filled from LSB) */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0]; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(i915, bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else { drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", sha_leftovers); return -EINVAL; } intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ while ((sha_idx % 64) < (64 - sizeof(sha_text))) { ret = intel_write_sha_text(i915, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } /* * Last write gets the length of the concatenation in bits. That is: * - 5 bytes per device * - 10 bytes for BINFO/BSTATUS(2), M0(8) */ sha_text = (num_downstream * 5 + 10) * 8; ret = intel_write_sha_text(i915, sha_text); if (ret < 0) return ret; /* Tell the HW we're done with the hash and wait for it to ACK */ intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, 1)) { drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); return -ENXIO; } return 0; } /* Implements Part 2 of the HDCP authorization procedure */ static int intel_hdcp_auth_downstream(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); if (ret) { drm_dbg_kms(&i915->drm, "KSV list failed to become ready (%d)\n", ret); return ret; } ret = shim->read_bstatus(dig_port, bstatus); if (ret) return ret; if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); return -EPERM; } /* * When repeater reports 0 device count, HDCP1.4 spec allows disabling * the HDCP encryption. That implies that repeater can't have its own * display. As there is no consumption of encrypted content in the * repeater with 0 downstream devices, we are failing the * authentication. */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); if (num_downstream == 0) { drm_dbg_kms(&i915->drm, "Repeater with zero downstream devices\n"); return -EINVAL; } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) { drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); return -ENOMEM; } ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); if (ret) goto err; if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, num_downstream) > 0) { drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; } /* * When V prime mismatches, DP Spec mandates re-read of * V prime atleast twice. */ for (i = 0; i < tries; i++) { ret = intel_hdcp_validate_v_prime(connector, shim, ksv_fifo, num_downstream, bstatus); if (!ret) break; } if (i == tries) { drm_dbg_kms(&i915->drm, "V Prime validation failed.(%d)\n", ret); goto err; } drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", num_downstream); ret = 0; err: kfree(ksv_fifo); return ret; } /* Implements Part 1 of the HDCP authorization procedure */ static int intel_hdcp_auth(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = dig_port->base.port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; union { u32 reg[2]; u8 shim[DRM_HDCP_AN_LEN]; } an; union { u32 reg[2]; u8 shim[DRM_HDCP_KSV_LEN]; } bksv; union { u32 reg; u8 shim[DRM_HDCP_RI_LEN]; } ri; bool repeater_present, hdcp_capable; /* * Detects whether the display is HDCP capable. Although we check for * valid Bksv below, the HDCP over DP spec requires that we check * whether the display supports HDCP before we write An. For HDMI * displays, this is not necessary. */ if (shim->hdcp_capable) { ret = shim->hdcp_capable(dig_port, &hdcp_capable); if (ret) return ret; if (!hdcp_capable) { drm_dbg_kms(&i915->drm, "Panel is not HDCP capable\n"); return -EINVAL; } } /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) intel_de_write(i915, HDCP_ANINIT(i915, cpu_transcoder, port), get_random_u32()); intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ if (intel_de_wait_for_set(i915, HDCP_STATUS(i915, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { drm_err(&i915->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } an.reg[0] = intel_de_read(i915, HDCP_ANLO(i915, cpu_transcoder, port)); an.reg[1] = intel_de_read(i915, HDCP_ANHI(i915, cpu_transcoder, port)); ret = shim->write_an_aksv(dig_port, an.shim); if (ret) return ret; r0_prime_gen_start = jiffies; memset(&bksv, 0, sizeof(bksv)); ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); if (ret < 0) return ret; if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { drm_err(&i915->drm, "BKSV is revoked\n"); return -EPERM; } intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), bksv.reg[0]); intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(dig_port, &repeater_present); if (ret) return ret; if (repeater_present) intel_de_write(i915, HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } /* * Wait for R0' to become available. The spec says 100ms from Aksv, but * some monitors can take longer than this. We'll set the timeout at * 300ms just to be sure. * * On DP, there's an R0_READY bit available but no such bit * exists on HDMI. Since the upper-bound is the same, we'll just do * the stupid thing instead of polling on one and not the other. */ wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); tries = 3; /* * DP HDCP Spec mandates the two more reattempt to read R0, incase * of R0 mismatch. */ for (i = 0; i < tries; i++) { ri.reg = 0; ret = shim->read_ri_prime(dig_port, ri.shim); if (ret) return ret; intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { drm_dbg_kms(&i915->drm, "Timed out waiting for Ri prime match (%x)\n", intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ if (intel_de_wait_for_set(i915, HDCP_STATUS(i915, cpu_transcoder, port), HDCP_STATUS_ENC, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } /* DP MST Auth Part 1 Step 2.a and Step 2.b */ if (shim->stream_encryption) { ret = shim->stream_encryption(connector, true); if (ret) { drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", connector->base.name, connector->base.base.id); return ret; } drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } if (repeater_present) return intel_hdcp_auth_downstream(connector); drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); return 0; } static int _intel_hdcp_disable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; u32 repeater_ctl; int ret; drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); if (hdcp->shim->stream_encryption) { ret = hdcp->shim->stream_encryption(connector, false); if (ret) { drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", connector->base.name, connector->base.base.id); return ret; } drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); /* * If there are other connectors on this port using HDCP, * don't disable it until it disabled HDCP encryption for * all connectors in MST topology. */ if (dig_port->num_hdcp_streams > 0) return 0; } hdcp->hdcp_encrypted = false; intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); if (intel_de_wait_for_clear(i915, HDCP_STATUS(i915, cpu_transcoder, port), ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { drm_err(&i915->drm, "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); return ret; } drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); return 0; } static int _intel_hdcp_enable(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int i, ret, tries = 3; drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n", connector->base.name, connector->base.base.id); if (!hdcp_key_loadable(i915)) { drm_err(&i915->drm, "HDCP key Load is not possible\n"); return -ENXIO; } for (i = 0; i < KEY_LOAD_TRIES; i++) { ret = intel_hdcp_load_keys(i915); if (!ret) break; intel_hdcp_clear_keys(i915); } if (ret) { drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", ret); return ret; } /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { ret = intel_hdcp_auth(connector); if (!ret) { hdcp->hdcp_encrypted = true; return 0; } drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); /* Ensuring HDCP encryption and signalling are stopped. */ _intel_hdcp_disable(connector); } drm_dbg_kms(&i915->drm, "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) { return container_of(hdcp, struct intel_connector, hdcp); } static void intel_hdcp_update_value(struct intel_connector *connector, u64 value, bool update_property) { struct drm_device *dev = connector->base.dev; struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); if (hdcp->value == value) return; drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) dig_port->num_hdcp_streams--; } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { dig_port->num_hdcp_streams++; } hdcp->value = value; if (update_property) { drm_connector_get(&connector->base); queue_work(i915->unordered_wq, &hdcp->prop_work); } } /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); cpu_transcoder = hdcp->cpu_transcoder; /* Check_link valid only when HDCP1.4 is enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || !hdcp->hdcp_encrypted) { ret = -EINVAL; goto out; } if (drm_WARN_ON(&i915->drm, !intel_hdcp_in_use(i915, cpu_transcoder, port))) { drm_err(&i915->drm, "%s:%d HDCP link stopped encryption,%x\n", connector->base.name, connector->base.base.id, intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); ret = -ENXIO; intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } if (hdcp->shim->check_link(dig_port, connector)) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, true); } goto out; } drm_dbg_kms(&i915->drm, "[%s:%d] HDCP link failed, retrying authentication\n", connector->base.name, connector->base.base.id); ret = _intel_hdcp_disable(connector); if (ret) { drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } ret = _intel_hdcp_enable(connector); if (ret) { drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } out: mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } static void intel_hdcp_prop_work(struct work_struct *work) { struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, prop_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); mutex_lock(&hdcp->mutex); /* * This worker is only used to flip between ENABLED/DESIRED. Either of * those to UNDESIRED is handled by core. If value == UNDESIRED, * we're running just after hdcp has been disabled, so just exit */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) drm_hdcp_update_content_protection(&connector->base, hdcp->value); mutex_unlock(&hdcp->mutex); drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); drm_connector_put(&connector->base); } bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) { return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && (DISPLAY_VER(i915) >= 12 || port < PORT_E); } static int hdcp2_prepare_ake_init(struct intel_connector *connector, struct hdcp2_ake_init *ake_data) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); if (ret) drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct hdcp2_ake_send_cert *rx_cert, bool *paired, struct hdcp2_ake_no_stored_km *ek_pub_km, size_t *msg_sz) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_verify_hprime(struct intel_connector *connector, struct hdcp2_ake_send_hprime *rx_hprime) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); if (ret < 0) drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_store_pairing_info(struct intel_connector *connector, struct hdcp2_ake_send_pairing_info *pairing_info) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); if (ret < 0) drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_prepare_lc_init(struct intel_connector *connector, struct hdcp2_lc_init *lc_init) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); if (ret < 0) drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_verify_lprime(struct intel_connector *connector, struct hdcp2_lc_send_lprime *rx_lprime) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); if (ret < 0) drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_prepare_skey(struct intel_connector *connector, struct hdcp2_ske_send_eks *ske_data) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); if (ret < 0) drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, struct hdcp2_rep_send_receiverid_list *rep_topology, struct hdcp2_rep_send_ack *rep_send_ack) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, data, rep_topology, rep_send_ack); if (ret < 0) drm_dbg_kms(&i915->drm, "Verify rep topology failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_verify_mprime(struct intel_connector *connector, struct hdcp2_rep_stream_ready *stream_ready) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); if (ret < 0) drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_authenticate_port(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); if (ret < 0) drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", ret); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_close_session(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct i915_hdcp_arbiter *arbiter; int ret; mutex_lock(&i915->display.hdcp.hdcp_mutex); arbiter = i915->display.hdcp.arbiter; if (!arbiter || !arbiter->ops) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return -EINVAL; } ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, &dig_port->hdcp_port_data); mutex_unlock(&i915->display.hdcp.hdcp_mutex); return ret; } static int hdcp2_deauthenticate_port(struct intel_connector *connector) { return hdcp2_close_session(connector); } /* Authentication flow starts from here */ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_ake_init ake_init; struct hdcp2_ake_send_cert send_cert; struct hdcp2_ake_no_stored_km no_stored_km; struct hdcp2_ake_send_hprime send_hprime; struct hdcp2_ake_send_pairing_info pairing_info; } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; size_t size; int ret; /* Init for seq_num */ hdcp->seq_num_v = 0; hdcp->seq_num_m = 0; ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); if (ret < 0) return ret; ret = shim->write_2_2_msg(dig_port, &msgs.ake_init, sizeof(msgs.ake_init)); if (ret < 0) return ret; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT, &msgs.send_cert, sizeof(msgs.send_cert)); if (ret < 0) return ret; if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); if (drm_hdcp_check_ksvs_revoked(&i915->drm, msgs.send_cert.cert_rx.receiver_id, 1) > 0) { drm_err(&i915->drm, "Receiver ID is revoked\n"); return -EPERM; } /* * Here msgs.no_stored_km will hold msgs corresponding to the km * stored also. */ ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, &hdcp->is_paired, &msgs.no_stored_km, &size); if (ret < 0) return ret; ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size); if (ret < 0) return ret; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME, &msgs.send_hprime, sizeof(msgs.send_hprime)); if (ret < 0) return ret; ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); if (ret < 0) return ret; if (!hdcp->is_paired) { /* Pairing is required */ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_PAIRING_INFO, &msgs.pairing_info, sizeof(msgs.pairing_info)); if (ret < 0) return ret; ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); if (ret < 0) return ret; hdcp->is_paired = true; } return 0; } static int hdcp2_locality_check(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_lc_init lc_init; struct hdcp2_lc_send_lprime send_lprime; } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; int tries = HDCP2_LC_RETRY_CNT, ret, i; for (i = 0; i < tries; i++) { ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); if (ret < 0) continue; ret = shim->write_2_2_msg(dig_port, &msgs.lc_init, sizeof(msgs.lc_init)); if (ret < 0) continue; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_LC_SEND_LPRIME, &msgs.send_lprime, sizeof(msgs.send_lprime)); if (ret < 0) continue; ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); if (!ret) break; } return ret; } static int hdcp2_session_key_exchange(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp2_ske_send_eks send_eks; int ret; ret = hdcp2_prepare_skey(connector, &send_eks); if (ret < 0) return ret; ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks, sizeof(send_eks)); if (ret < 0) return ret; return 0; } static int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_stream_manage stream_manage; struct hdcp2_rep_stream_ready stream_ready; } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; int ret, streams_size_delta, i; if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) return -ERANGE; /* Prepare RepeaterAuth_Stream_Manage msg */ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); msgs.stream_manage.k = cpu_to_be16(data->k); for (i = 0; i < data->k; i++) { msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; } streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * sizeof(struct hdcp2_streamid_type); /* Send it to Repeater */ ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage, sizeof(msgs.stream_manage) - streams_size_delta); if (ret < 0) goto out; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY, &msgs.stream_ready, sizeof(msgs.stream_ready)); if (ret < 0) goto out; data->seq_num_m = hdcp->seq_num_m; ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); out: hdcp->seq_num_m++; return ret; } static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_send_receiverid_list recvid_list; struct hdcp2_rep_send_ack rep_ack; } msgs; const struct intel_hdcp_shim *shim = hdcp->shim; u32 seq_num_v, device_cnt; u8 *rx_info; int ret; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, &msgs.recvid_list, sizeof(msgs.recvid_list)); if (ret < 0) return ret; rx_info = msgs.recvid_list.rx_info; if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); return -EINVAL; } /* * MST topology is not Type 1 capable if it contains a downstream * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. */ dig_port->hdcp_mst_type1_capable = !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); /* Converting and Storing the seq_num_v to local variable as DWORD */ seq_num_v = drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); if (!hdcp->hdcp2_encrypted && seq_num_v) { drm_dbg_kms(&i915->drm, "Non zero Seq_num_v at first RecvId_List msg\n"); return -EINVAL; } if (seq_num_v < hdcp->seq_num_v) { /* Roll over of the seq_num_v from repeater. Reauthenticate. */ drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); return -EINVAL; } device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); if (drm_hdcp_check_ksvs_revoked(&i915->drm, msgs.recvid_list.receiver_ids, device_cnt) > 0) { drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } ret = hdcp2_verify_rep_topology_prepare_ack(connector, &msgs.recvid_list, &msgs.rep_ack); if (ret < 0) return ret; hdcp->seq_num_v = seq_num_v; ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack, sizeof(msgs.rep_ack)); if (ret < 0) return ret; return 0; } static int hdcp2_authenticate_sink(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; int ret; ret = hdcp2_authentication_key_exchange(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); return ret; } ret = hdcp2_locality_check(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "Locality Check failed. Err : %d\n", ret); return ret; } ret = hdcp2_session_key_exchange(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); return ret; } if (shim->config_stream_type) { ret = shim->config_stream_type(dig_port, hdcp->is_repeater, hdcp->content_type); if (ret < 0) return ret; } if (hdcp->is_repeater) { ret = hdcp2_authenticate_repeater_topology(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "Repeater Auth Failed. Err: %d\n", ret); return ret; } } return ret; } static int hdcp2_enable_stream_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; enum port port = dig_port->base.port; int ret = 0; if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)) { drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", connector->base.name, connector->base.base.id); ret = -EPERM; goto link_recover; } if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, true); if (ret) { drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", connector->base.name, connector->base.base.id); return ret; } drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", transcoder_name(hdcp->stream_transcoder)); } return 0; link_recover: if (hdcp2_deauthenticate_port(connector) < 0) drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); dig_port->hdcp_auth_status = false; data->k = 0; return ret; } static int hdcp2_enable_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; drm_WARN_ON(&i915->drm, intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) { drm_err(&i915->drm, "Failed to enable HDCP signalling. %d\n", ret); return ret; } } if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 0, CTL_LINK_ENCRYPTION_REQ); ret = intel_de_wait_for_set(i915, HDCP2_STATUS(i915, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); dig_port->hdcp_auth_status = true; return ret; } static int hdcp2_disable_encryption(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)); intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), CTL_LINK_ENCRYPTION_REQ, 0); ret = intel_de_wait_for_clear(i915, HDCP2_STATUS(i915, cpu_transcoder, port), LINK_ENCRYPTION_STATUS, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { drm_err(&i915->drm, "Failed to disable HDCP signalling. %d\n", ret); return ret; } } return ret; } static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); int i, tries = 3, ret; if (!connector->hdcp.is_repeater) return 0; for (i = 0; i < tries; i++) { ret = _hdcp2_propagate_stream_management_info(connector); if (!ret) break; /* Lets restart the auth incase of seq_num_m roll over */ if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { drm_dbg_kms(&i915->drm, "seq_num_m roll over.(%d)\n", ret); break; } drm_dbg_kms(&i915->drm, "HDCP2 stream management %d of %d Failed.(%d)\n", i + 1, tries, ret); } return ret; } static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret = 0, i, tries = 3; for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { ret = hdcp2_authenticate_sink(connector); if (!ret) { intel_hdcp_prepare_streams(connector); ret = hdcp2_propagate_stream_management_info(connector); if (ret) { drm_dbg_kms(&i915->drm, "Stream management failed.(%d)\n", ret); break; } ret = hdcp2_authenticate_port(connector); if (!ret) break; drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", ret); } /* Clearing the mei hdcp session */ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", i + 1, tries, ret); if (hdcp2_deauthenticate_port(connector) < 0) drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); } if (!ret && !dig_port->hdcp_auth_status) { /* * Ensuring the required 200mSec min time interval between * Session Key Exchange and encryption. */ msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); ret = hdcp2_enable_encryption(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "Encryption Enable Failed.(%d)\n", ret); if (hdcp2_deauthenticate_port(connector) < 0) drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); } } if (!ret) ret = hdcp2_enable_stream_encryption(connector); return ret; } static int _intel_hdcp2_enable(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", connector->base.name, connector->base.base.id, hdcp->content_type); ret = hdcp2_authenticate_and_encrypt(connector); if (ret) { drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", hdcp->content_type, ret); return ret; } drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", connector->base.name, connector->base.base.id, hdcp->content_type); hdcp->hdcp2_encrypted = true; return 0; } static int _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; int ret; drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", connector->base.name, connector->base.base.id); if (hdcp->shim->stream_2_2_encryption) { ret = hdcp->shim->stream_2_2_encryption(connector, false); if (ret) { drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n", connector->base.name, connector->base.base.id); return ret; } drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", transcoder_name(hdcp->stream_transcoder)); if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) return 0; } ret = hdcp2_disable_encryption(connector); if (hdcp2_deauthenticate_port(connector) < 0) drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); connector->hdcp.hdcp2_encrypted = false; dig_port->hdcp_auth_status = false; data->k = 0; return ret; } /* Implements the Link Integrity Check for HDCP2.2 */ static int intel_hdcp2_check_link(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); cpu_transcoder = hdcp->cpu_transcoder; /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || !hdcp->hdcp2_encrypted) { ret = -EINVAL; goto out; } if (drm_WARN_ON(&i915->drm, !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { drm_err(&i915->drm, "HDCP2.2 link stopped the encryption, %x\n", intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); ret = -ENXIO; _intel_hdcp2_disable(connector, true); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } ret = hdcp->shim->check_2_2_link(dig_port, connector); if (ret == HDCP_LINK_PROTECTED) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, true); } goto out; } if (ret == HDCP_TOPOLOGY_CHANGE) { if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; drm_dbg_kms(&i915->drm, "HDCP2.2 Downstream topology change\n"); ret = hdcp2_authenticate_repeater_topology(connector); if (!ret) { intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, true); goto out; } drm_dbg_kms(&i915->drm, "[%s:%d] Repeater topology auth failed.(%d)\n", connector->base.name, connector->base.base.id, ret); } else { drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 link failed, retrying auth\n", connector->base.name, connector->base.base.id); } ret = _intel_hdcp2_disable(connector, true); if (ret) { drm_err(&i915->drm, "[%s:%d] Failed to disable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } ret = _intel_hdcp2_enable(connector); if (ret) { drm_dbg_kms(&i915->drm, "[%s:%d] Failed to enable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } out: mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } static void intel_hdcp_check_work(struct work_struct *work) { struct intel_hdcp *hdcp = container_of(to_delayed_work(work), struct intel_hdcp, check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); struct drm_i915_private *i915 = to_i915(connector->base.dev); if (drm_connector_is_unregistered(&connector->base)) return; if (!intel_hdcp2_check_link(connector)) queue_delayed_work(i915->unordered_wq, &hdcp->check_work, DRM_HDCP2_CHECK_PERIOD_MS); else if (!intel_hdcp_check_link(connector)) queue_delayed_work(i915->unordered_wq, &hdcp->check_work, DRM_HDCP_CHECK_PERIOD_MS); } static int i915_hdcp_component_bind(struct device *i915_kdev, struct device *mei_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); mutex_lock(&i915->display.hdcp.hdcp_mutex); i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; mutex_unlock(&i915->display.hdcp.hdcp_mutex); return 0; } static void i915_hdcp_component_unbind(struct device *i915_kdev, struct device *mei_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); mutex_lock(&i915->display.hdcp.hdcp_mutex); i915->display.hdcp.arbiter = NULL; mutex_unlock(&i915->display.hdcp.hdcp_mutex); } static const struct component_ops i915_hdcp_ops = { .bind = i915_hdcp_component_bind, .unbind = i915_hdcp_component_unbind, }; static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) { switch (port) { case PORT_A: return HDCP_DDI_A; case PORT_B ... PORT_F: return (enum hdcp_ddi)port; default: return HDCP_DDI_INVALID_PORT; } } static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) { switch (cpu_transcoder) { case TRANSCODER_A ... TRANSCODER_D: return (enum hdcp_transcoder)(cpu_transcoder | 0x10); default: /* eDP, DSI TRANSCODERS are non HDCP capable */ return HDCP_INVALID_TRANSCODER; } } static int initialize_hdcp_port_data(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; enum port port = dig_port->base.port; if (DISPLAY_VER(i915) < 12) data->hdcp_ddi = intel_get_hdcp_ddi_index(port); else /* * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled * with zero(INVALID PORT index). */ data->hdcp_ddi = HDCP_DDI_INVALID_PORT; /* * As associated transcoder is set and modified at modeset, here hdcp_transcoder * is initialized to zero (invalid transcoder index). This will be * retained for <Gen12 forever. */ data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; data->protocol = (u8)shim->protocol; if (!data->streams) data->streams = kcalloc(INTEL_NUM_PIPES(i915), sizeof(struct hdcp2_streamid_type), GFP_KERNEL); if (!data->streams) { drm_err(&i915->drm, "Out of Memory\n"); return -ENOMEM; } return 0; } static bool is_hdcp2_supported(struct drm_i915_private *i915) { if (intel_hdcp_gsc_cs_required(i915)) return true; if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; return (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)); } void intel_hdcp_component_init(struct drm_i915_private *i915) { int ret; if (!is_hdcp2_supported(i915)) return; mutex_lock(&i915->display.hdcp.hdcp_mutex); drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); i915->display.hdcp.comp_added = true; mutex_unlock(&i915->display.hdcp.hdcp_mutex); if (intel_hdcp_gsc_cs_required(i915)) ret = intel_hdcp_gsc_init(i915); else ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, I915_COMPONENT_HDCP); if (ret < 0) { drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", ret); mutex_lock(&i915->display.hdcp.hdcp_mutex); i915->display.hdcp.comp_added = false; mutex_unlock(&i915->display.hdcp.hdcp_mutex); return; } } static void intel_hdcp2_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = initialize_hdcp_port_data(connector, dig_port, shim); if (ret) { drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); return; } hdcp->hdcp2_supported = true; } int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; if (!shim) return -EINVAL; if (is_hdcp2_supported(i915)) intel_hdcp2_init(connector, dig_port, shim); ret = drm_connector_attach_content_protection_property(&connector->base, hdcp->hdcp2_supported); if (ret) { hdcp->hdcp2_supported = false; kfree(dig_port->hdcp_port_data.streams); return ret; } hdcp->shim = shim; mutex_init(&hdcp->mutex); INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); init_waitqueue_head(&hdcp->cp_irq_queue); return 0; } static int intel_hdcp_set_streams(struct intel_digital_port *dig_port, struct intel_atomic_state *state) { struct drm_connector_list_iter conn_iter; struct intel_digital_port *conn_dig_port; struct intel_connector *connector; struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; if (!intel_encoder_is_mst(&dig_port->base)) { data->k = 1; data->streams[0].stream_id = 0; return 0; } data->k = 0; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.status == connector_status_disconnected) continue; if (!intel_encoder_is_mst(intel_attached_encoder(connector))) continue; conn_dig_port = intel_attached_dig_port(connector); if (conn_dig_port != dig_port) continue; data->streams[data->k].stream_id = intel_conn_to_vcpi(&state->base, connector); data->k++; /* if there is only one active stream */ if (dig_port->dp.active_mst_links <= 1) break; } drm_connector_list_iter_end(&conn_iter); if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) return -EINVAL; return 0; } int intel_hdcp_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; int ret = -EINVAL; if (!hdcp->shim) return -ENOENT; if (!connector->encoder) { drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n", connector->base.name, connector->base.base.id); return -ENODEV; } mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); drm_WARN_ON(&i915->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = (u8)conn_state->hdcp_content_type; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; hdcp->stream_transcoder = pipe_config->cpu_transcoder; } else { hdcp->cpu_transcoder = pipe_config->cpu_transcoder; hdcp->stream_transcoder = INVALID_TRANSCODER; } if (DISPLAY_VER(i915) >= 12) dig_port->hdcp_port_data.hdcp_transcoder = intel_get_hdcp_transcoder(hdcp->cpu_transcoder); /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup * is capable of HDCP2.2, it is preferred to use HDCP2.2. */ if (intel_hdcp2_capable(connector)) { ret = intel_hdcp_set_streams(dig_port, state); if (!ret) { ret = _intel_hdcp2_enable(connector); if (!ret) check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; } else { drm_dbg_kms(&i915->drm, "Set content streams failed: (%d)\n", ret); } } /* * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will * be attempted. */ if (ret && intel_hdcp_capable(connector) && hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { ret = _intel_hdcp_enable(connector); } if (!ret) { queue_delayed_work(i915->unordered_wq, &hdcp->check_work, check_link_interval); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, true); } mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } int intel_hdcp_disable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret = 0; if (!hdcp->shim) return -ENOENT; mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp_mutex); if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); if (hdcp->hdcp2_encrypted) ret = _intel_hdcp2_disable(connector, false); else if (hdcp->hdcp_encrypted) ret = _intel_hdcp_disable(connector); out: mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); cancel_delayed_work_sync(&hdcp->check_work); return ret; } void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; bool content_protection_type_changed, desired_and_not_enabled = false; struct drm_i915_private *i915 = to_i915(connector->base.dev); if (!connector->hdcp.shim) return; content_protection_type_changed = (conn_state->hdcp_content_type != hdcp->content_type && conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED); /* * During the HDCP encryption session if Type change is requested, * disable the HDCP and reenable it with new TYPE value. */ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED || content_protection_type_changed) intel_hdcp_disable(connector); /* * Mark the hdcp state as DESIRED after the hdcp disable of type * change procedure. */ if (content_protection_type_changed) { mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; drm_connector_get(&connector->base); queue_work(i915->unordered_wq, &hdcp->prop_work); mutex_unlock(&hdcp->mutex); } if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { mutex_lock(&hdcp->mutex); /* Avoid enabling hdcp, if it already ENABLED */ desired_and_not_enabled = hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; mutex_unlock(&hdcp->mutex); /* * If HDCP already ENABLED and CP property is DESIRED, schedule * prop_work to update correct CP property to user space. */ if (!desired_and_not_enabled && !content_protection_type_changed) { drm_connector_get(&connector->base); queue_work(i915->unordered_wq, &hdcp->prop_work); } } if (desired_and_not_enabled || content_protection_type_changed) intel_hdcp_enable(state, encoder, crtc_state, conn_state); } void intel_hdcp_component_fini(struct drm_i915_private *i915) { mutex_lock(&i915->display.hdcp.hdcp_mutex); if (!i915->display.hdcp.comp_added) { mutex_unlock(&i915->display.hdcp.hdcp_mutex); return; } i915->display.hdcp.comp_added = false; mutex_unlock(&i915->display.hdcp.hdcp_mutex); if (intel_hdcp_gsc_cs_required(i915)) intel_hdcp_gsc_fini(i915); else component_del(i915->drm.dev, &i915_hdcp_ops); } void intel_hdcp_cleanup(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; if (!hdcp->shim) return; /* * If the connector is registered, it's possible userspace could kick * off another HDCP enable, which would re-spawn the workers. */ drm_WARN_ON(connector->base.dev, connector->base.registration_state == DRM_CONNECTOR_REGISTERED); /* * Now that the connector is not registered, check_work won't be run, * but cancel any outstanding instances of it */ cancel_delayed_work_sync(&hdcp->check_work); /* * We don't cancel prop_work in the same way as check_work since it * requires connection_mutex which could be held while calling this * function. Instead, we rely on the connector references grabbed before * scheduling prop_work to ensure the connector is alive when prop_work * is run. So if we're in the destroy path (which is where this * function should be called), we're "guaranteed" that prop_work is not * active (tl;dr This Should Never Happen). */ drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); mutex_lock(&hdcp->mutex); hdcp->shim = NULL; mutex_unlock(&hdcp->mutex); } void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state) { u64 old_cp = old_state->content_protection; u64 new_cp = new_state->content_protection; struct drm_crtc_state *crtc_state; if (!new_state->crtc) { /* * If the connector is being disabled with CP enabled, mark it * desired so it's re-enabled when the connector is brought back */ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; return; } crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc); /* * Fix the HDCP uapi content protection state in case of modeset. * FIXME: As per HDCP content protection property uapi doc, an uevent() * need to be sent if there is transition from ENABLED->DESIRED. */ if (drm_atomic_crtc_needs_modeset(crtc_state) && (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* * Nothing to do if the state didn't change, or HDCP was activated since * the last commit. And also no change in hdcp content type. */ if (old_cp == new_cp || (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { if (old_state->hdcp_content_type == new_state->hdcp_content_type) return; } crtc_state->mode_changed = true; } /* Handles the CP_IRQ raised from the DP HDCP sink */ void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; struct drm_i915_private *i915 = to_i915(connector->base.dev); if (!hdcp->shim) return; atomic_inc(&connector->hdcp.cp_irq_count); wake_up_all(&connector->hdcp.cp_irq_queue); queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); }
linux-master
drivers/gpu/drm/i915/display/intel_hdcp.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/string_helpers.h> #include "i915_reg.h" #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" struct intel_fdi_funcs { void (*fdi_link_train)(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); }; static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { bool cur_state; if (HAS_DDI(dev_priv)) { /* * DDI does not have a specific FDI_TX register. * * FDI is never fed from EDP transcoder * so pipe->transcoder cast is fine here. */ enum transcoder cpu_transcoder = (enum transcoder)pipe; cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; } else { cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; } I915_STATE_WARN(dev_priv, cur_state != state, "FDI TX state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_tx(i915, pipe, true); } void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_tx(i915, pipe, false); } static void assert_fdi_rx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { bool cur_state; cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; I915_STATE_WARN(dev_priv, cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_rx(i915, pipe, true); } void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_rx(i915, pipe, false); } void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) { bool cur_state; /* ILK FDI PLL is always enabled */ if (IS_IRONLAKE(i915)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ if (HAS_DDI(i915)) return; cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; I915_STATE_WARN(i915, !cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll(struct drm_i915_private *i915, enum pipe pipe, bool state) { bool cur_state; cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; I915_STATE_WARN(i915, cur_state != state, "FDI RX PLL assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_rx_pll(i915, pipe, true); } void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) { assert_fdi_rx_pll(i915, pipe, false); } void intel_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state); } /* units of 100MHz */ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) { if (crtc_state->hw.enable && crtc_state->has_pch_encoder) return crtc_state->fdi_lanes; return 0; } static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->uapi.state; struct intel_crtc *other_crtc; struct intel_crtc_state *other_crtc_state; drm_dbg_kms(&dev_priv->drm, "checking fdi config on pipe %c, lanes %i\n", pipe_name(pipe), pipe_config->fdi_lanes); if (pipe_config->fdi_lanes > 4) { drm_dbg_kms(&dev_priv->drm, "invalid fdi lane config on pipe %c: %i lanes\n", pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { if (pipe_config->fdi_lanes > 2) { drm_dbg_kms(&dev_priv->drm, "only 2 lanes on haswell, required: %i lanes\n", pipe_config->fdi_lanes); return -EINVAL; } else { return 0; } } if (INTEL_NUM_PIPES(dev_priv) == 2) return 0; /* Ivybridge 3 pipe is really complicated */ switch (pipe) { case PIPE_A: return 0; case PIPE_B: if (pipe_config->fdi_lanes <= 2) return 0; other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) return PTR_ERR(other_crtc_state); if (pipe_required_fdi_lanes(other_crtc_state) > 0) { drm_dbg_kms(&dev_priv->drm, "invalid shared fdi lane config on pipe %c: %i lanes\n", pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } return 0; case PIPE_C: if (pipe_config->fdi_lanes > 2) { drm_dbg_kms(&dev_priv->drm, "only 2 lanes on pipe %c: required %i lanes\n", pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) return PTR_ERR(other_crtc_state); if (pipe_required_fdi_lanes(other_crtc_state) > 2) { drm_dbg_kms(&dev_priv->drm, "fdi link B uses too many lanes to enable link C\n"); return -EINVAL; } return 0; default: MISSING_CASE(pipe); return 0; } } void intel_fdi_pll_freq_update(struct drm_i915_private *i915) { if (IS_IRONLAKE(i915)) { u32 fdi_pll_clk = intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000; } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { i915->display.fdi.pll_freq = 270000; } else { return; } drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq); } int intel_fdi_link_freq(struct drm_i915_private *i915, const struct intel_crtc_state *pipe_config) { if (HAS_DDI(i915)) return pipe_config->port_clock; /* SPLL */ else return i915->display.fdi.pll_freq; } int ilk_fdi_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *i915 = to_i915(dev); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int lane, link_bw, fdi_dotclock, ret; bool needs_recompute = false; retry: /* FDI is a binary signal running at ~2.7GHz, encoding * each output octet as 10 bits. The actual frequency * is stored as a divider into a 100MHz clock, and the * mode pixel clock is stored in units of 1KHz. * Hence the bw of each lane in terms of the mode signal * is: */ link_bw = intel_fdi_link_freq(i915, pipe_config); fdi_dotclock = adjusted_mode->crtc_clock; lane = ilk_get_lanes_required(fdi_dotclock, link_bw, pipe_config->pipe_bpp); pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n, false); ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); if (ret == -EDEADLK) return ret; if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { pipe_config->pipe_bpp -= 2*3; drm_dbg_kms(&i915->drm, "fdi link bw constraint, reducing pipe bpp to %i\n", pipe_config->pipe_bpp); needs_recompute = true; pipe_config->bw_constrained = true; goto retry; } if (needs_recompute) return -EAGAIN; return ret; } static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) { u32 temp; temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) return; drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); temp &= ~FDI_BC_BIFURCATION_SELECT; if (enable) temp |= FDI_BC_BIFURCATION_SELECT; drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", enable ? "en" : "dis"); intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); } static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); switch (crtc->pipe) { case PIPE_A: break; case PIPE_B: if (crtc_state->fdi_lanes > 2) cpt_set_fdi_bc_bifurcation(dev_priv, false); else cpt_set_fdi_bc_bifurcation(dev_priv, true); break; case PIPE_C: cpt_set_fdi_bc_bifurcation(dev_priv, true); break; default: MISSING_CASE(crtc->pipe); } } void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; /* enable normal train */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if (IS_IVYBRIDGE(dev_priv)) { temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; } intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE; } intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); /* wait one idle pattern time */ intel_de_posting_read(dev_priv, reg); udelay(1000); /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev_priv)) intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } /* The FDI link training functions for ILK/Ibexpeak. */ static void ilk_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, tries; /* * Write the TU size bits before fdi link training, so that error * detection works. */ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* FDI needs bits from pipe first */ assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; intel_de_write(dev_priv, reg, temp); intel_de_read(dev_priv, reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(150); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { intel_de_write(dev_priv, reg, temp | FDI_RX_SYMBOL_LOCK); drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); break; } } if (tries == 5) drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); } static const int snb_b_fdi_train_param[] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, FDI_LINK_TRAIN_800MV_0DB_SNB_B, }; /* The FDI link training functions for SNB/Cougarpoint. */ static void gen6_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, retry; /* * Write the TU size bits before fdi link training, so that error * detection works. */ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); intel_de_write(dev_priv, FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(150); for (i = 0; i < 4; i++) { intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { reg = FDI_RX_IIR(pipe); temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK) { intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); break; } udelay(50); } if (retry < 5) break; } if (i == 4) drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_SANDYBRIDGE(dev_priv)) { temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); udelay(150); for (i = 0; i < 4; i++) { intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { reg = FDI_RX_IIR(pipe); temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { intel_de_write(dev_priv, reg, temp | FDI_RX_SYMBOL_LOCK); drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); break; } udelay(50); } if (retry < 5) break; } if (i == 4) drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); } /* Manual link training for Ivy Bridge A0 parts */ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, j; ivb_update_fdi_bc_bifurcation(crtc_state); /* * Write the TU size bits before fdi link training, so that error * detection works. */ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); udelay(150); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", intel_de_read(dev_priv, FDI_RX_IIR(pipe))); /* Try each vswing and preemphasis setting twice before moving on */ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { /* disable first in case we need to retry */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); temp &= ~FDI_TX_ENABLE; intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_AUTO; temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp &= ~FDI_RX_ENABLE; intel_de_write(dev_priv, reg, temp); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[j/2]; temp |= FDI_COMPOSITE_SYNC; intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); intel_de_write(dev_priv, FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; temp |= FDI_COMPOSITE_SYNC; intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(1); /* should be 0.5us */ for (i = 0; i < 4; i++) { reg = FDI_RX_IIR(pipe); temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK || (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); drm_dbg_kms(&dev_priv->drm, "FDI train 1 done, level %i.\n", i); break; } udelay(1); /* should be 0.5us */ } if (i == 4) { drm_dbg_kms(&dev_priv->drm, "FDI train 1 fail on vswing %d\n", j / 2); continue; } /* Train 2 */ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_LINK_TRAIN_NONE_IVB, FDI_LINK_TRAIN_PATTERN_2_IVB); intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_LINK_TRAIN_PATTERN_MASK_CPT, FDI_LINK_TRAIN_PATTERN_2_CPT); intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(2); /* should be 1.5us */ for (i = 0; i < 4; i++) { reg = FDI_RX_IIR(pipe); temp = intel_de_read(dev_priv, reg); drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK || (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { intel_de_write(dev_priv, reg, temp | FDI_RX_SYMBOL_LOCK); drm_dbg_kms(&dev_priv->drm, "FDI train 2 done, level %i.\n", i); goto train_done; } udelay(2); /* should be 1.5us */ } if (i == 4) drm_dbg_kms(&dev_priv->drm, "FDI train 2 fail on vswing %d\n", j / 2); } train_done: drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); } /* Starting with Haswell, different DDI ports can work in FDI mode for * connection to the PCH-located connectors. For this, it is necessary to train * both the DDI port and PCH receiver for the desired DDI buffer settings. * * The recommended port to work in FDI mode is DDI E, which we use here. Also, * please note that when FDI mode is active on DDI E, it shares 2 lines with * DDI A (which is used for eDP) */ void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 temp, i, rx_ctl_val; int n_entries; encoder->get_buf_trans(encoder, crtc_state, &n_entries); hsw_prepare_dp_ddi_buffers(encoder, crtc_state); /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: * - TP1 to TP2 time with the default value * - FDI delay to 90h * * WaFDIAutoLinkSetTimingOverrride:hsw */ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); /* Enable the PCH Receiver FDI PLL */ rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | FDI_RX_PLL_ENABLE | FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); udelay(220); /* Switch from Rawclk to PCDclk */ rx_ctl_val |= FDI_PCDCLK; intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); /* Configure Port Clock Select */ drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL); intel_ddi_enable_clock(encoder, crtc_state); /* Start the training iterating through available voltages and emphasis, * testing each value twice. */ for (i = 0; i < n_entries * 2; i++) { /* Configure DP_TP_CTL with auto-training */ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. * DDI E does not support port reversal, the functionality is * achieved on the PCH side in FDI_RX_CTL, so no need to set the * port reversal bit */ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); udelay(600); /* Program PCH FDI Receiver TU */ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); /* Enable PCH FDI Receiver with auto-training */ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); /* Wait for FDI receiver lane calibration */ udelay(30); /* Unset FDI_RX_MISC pwrdn lanes */ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); /* Wait for FDI auto training time */ udelay(5); temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { drm_dbg_kms(&dev_priv->drm, "FDI link training done on step %d\n", i); break; } /* * Leave things enabled even if we failed to train FDI. * Results in less fireworks from the state checker. */ if (i == n_entries * 2 - 1) { drm_err(&dev_priv->drm, "FDI link training failed!\n"); break; } rx_ctl_val &= ~FDI_RX_ENABLE; intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0); intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); intel_wait_ddi_buf_idle(dev_priv, PORT_E); /* Reset FDI_RX_MISC pwrdn lanes */ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); } /* Enable normal pixel sending for FDI */ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE); } void hsw_fdi_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); /* * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, * step 13 is the correct place for it. Step 18 is where it was * originally before the BUN. */ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_wait_ddi_buf_idle(dev_priv, PORT_E); intel_ddi_disable_clock(encoder); intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); } void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(200); /* Switch from Rawclk to PCDclk */ intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK); intel_de_posting_read(dev_priv, reg); udelay(200); /* Enable CPU FDI TX PLL, always on for Ironlake */ reg = FDI_TX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if ((temp & FDI_TX_PLL_ENABLE) == 0) { intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(100); } } void ilk_fdi_pll_disable(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; /* Switch from PCDclk to Rawclk */ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); /* Disable CPU FDI TX PLL */ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(100); /* Wait for the clocks to turn off. */ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(100); } void ilk_fdi_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; /* disable CPU FDI tx and PCH FDI rx */ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~(0x7 << 16); temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev_priv)) intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); /* still set train pattern 1 */ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } /* BPC in FDI rx is consistent with that in TRANSCONF */ temp &= ~(0x07 << 16); temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); udelay(100); } static const struct intel_fdi_funcs ilk_funcs = { .fdi_link_train = ilk_fdi_link_train, }; static const struct intel_fdi_funcs gen6_funcs = { .fdi_link_train = gen6_fdi_link_train, }; static const struct intel_fdi_funcs ivb_funcs = { .fdi_link_train = ivb_manual_fdi_link_train, }; void intel_fdi_init_hook(struct drm_i915_private *dev_priv) { if (IS_IRONLAKE(dev_priv)) { dev_priv->display.funcs.fdi = &ilk_funcs; } else if (IS_SANDYBRIDGE(dev_priv)) { dev_priv->display.funcs.fdi = &gen6_funcs; } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.funcs.fdi = &ivb_funcs; } }
linux-master
drivers/gpu/drm/i915/display/intel_fdi.c
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_psr_regs.h" #include "intel_snps_phy.h" #include "skl_universal_plane.h" /** * DOC: Panel Self Refresh (PSR/SRD) * * Since Haswell Display controller supports Panel Self-Refresh on display * panels witch have a remote frame buffer (RFB) implemented according to PSR * spec in eDP1.3. PSR feature allows the display to go to lower standby states * when system is idle but display is on as it eliminates display refresh * request to DDR memory completely as long as the frame buffer for that * display is unchanged. * * Panel Self Refresh must be supported by both Hardware (source) and * Panel (sink). * * PSR saves power by caching the framebuffer in the panel RFB, which allows us * to power down the link and memory controller. For DSI panels the same idea * is called "manual mode". * * The implementation uses the hardware-based PSR support which automatically * enters/exits self-refresh mode. The hardware takes care of sending the * required DP aux message and could even retrain the link (that part isn't * enabled yet though). The hardware also keeps track of any frontbuffer * changes to know when to exit self-refresh mode again. Unfortunately that * part doesn't work too well, hence why the i915 PSR support uses the * software frontbuffer tracking to make sure it doesn't miss a screen * update. For this integration intel_psr_invalidate() and intel_psr_flush() * get called by the frontbuffer tracking code. Note that because of locking * issues the self-refresh re-enable code is done from a work queue, which * must be correctly synchronized/cancelled when shutting down the pipe." * * DC3CO (DC3 clock off) * * On top of PSR2, GEN12 adds a intermediate power savings state that turns * clock off automatically during PSR2 idle state. * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep * entry/exit allows the HW to enter a low-power state even when page flipping * periodically (for instance a 30fps video playback scenario). * * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 * frames, if no other flip occurs and the function above is executed, DC3CO is * disabled and PSR2 is configured to enter deep sleep, resetting again in case * of another flip. * Front buffer modifications do not trigger DC3CO activation on purpose as it * would bring a lot of complexity and most of the moderns systems will only * use page flips. */ /* * Description of PSR mask bits: * * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl): * * When unmasked (nearly) all display register writes (eg. even * SWF) trigger a PSR exit. Some registers are excluded from this * and they have a more specific mask (described below). On icl+ * this bit no longer exists and is effectively always set. * * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+): * * When unmasked (nearly) all pipe/plane register writes * trigger a PSR exit. Some plane registers are excluded from this * and they have a more specific mask (described below). * * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+): * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw): * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw): * * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit. * SPR_SURF/CURBASE are not included in this and instead are * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw). * * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw): * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw): * * When unmasked PSR is blocked as long as the sprite * plane is enabled. skl+ with their universal planes no * longer have a mask bit like this, and no plane being * enabledb blocks PSR. * * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw): * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw): * * When umasked CURPOS writes trigger a PSR exit. On skl+ * this doesn't exit but CURPOS is included in the * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask. * * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+): * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw): * * When unmasked PSR is blocked as long as vblank and/or vsync * interrupt is unmasked in IMR *and* enabled in IER. * * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+): * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw): * * Selectcs whether PSR exit generates an extra vblank before * the first frame is transmitted. Also note the opposite polarity * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank, * unmasked==do not generate the extra vblank). * * With DC states enabled the extra vblank happens after link training, * with DC states disabled it happens immediately upuon PSR exit trigger. * No idea as of now why there is a difference. HSW/BDW (which don't * even have DMC) always generate it after link training. Go figure. * * Unfortunately CHICKEN_TRANS itself seems to be double buffered * and thus won't latch until the first vblank. So with DC states * enabled the register effctively uses the reset value during DC5 * exit+PSR exit sequence, and thus the bit does nothing until * latched by the vblank that it was trying to prevent from being * generated in the first place. So we should probably call this * one a chicken/egg bit instead on skl+. * * In standby mode (as opposed to link-off) this makes no difference * as the timing generator keeps running the whole time generating * normal periodic vblanks. * * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw, * and doing so makes the behaviour match the skl+ reset value. * * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw): * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw): * * On BDW without this bit is no vblanks whatsoever are * generated after PSR exit. On HSW this has no apparant effect. * WaPsrDPRSUnmaskVBlankInSRD says to set this. * * The rest of the bits are more self-explanatory and/or * irrelevant for normal operation. */ static bool psr_global_enabled(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: if (i915->params.enable_psr == -1) return connector->panel.vbt.psr.enable; return i915->params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; default: return true; } } static bool psr2_global_enabled(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; default: if (i915->params.enable_psr == 1) return false; return true; } } static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR : EDP_PSR_ERROR(intel_dp->psr.transcoder); } static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT : EDP_PSR_POST_EXIT(intel_dp->psr.transcoder); } static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY : EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder); } static u32 psr_irq_mask_get(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK : EDP_PSR_MASK(intel_dp->psr.transcoder); } static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_CTL(cpu_transcoder); else return HSW_SRD_CTL; } static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_DEBUG(cpu_transcoder); else return HSW_SRD_DEBUG; } static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_PERF_CNT(cpu_transcoder); else return HSW_SRD_PERF_CNT; } static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_STATUS(cpu_transcoder); else return HSW_SRD_STATUS; } static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 12) return TRANS_PSR_IMR(cpu_transcoder); else return EDP_PSR_IMR; } static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 12) return TRANS_PSR_IIR(cpu_transcoder); else return EDP_PSR_IIR; } static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_AUX_CTL(cpu_transcoder); else return HSW_SRD_AUX_CTL; } static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, int i) { if (DISPLAY_VER(dev_priv) >= 8) return EDP_PSR_AUX_DATA(cpu_transcoder, i); else return HSW_SRD_AUX_DATA(i); } static void psr_irq_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; mask = psr_irq_psr_error_bit_get(intel_dp); if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, u32 val, bool psr2_enabled) { drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); if (val & PSR_EVENT_GRAPHICS_RESET) drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); if (val & PSR_EVENT_PCH_INTERRUPT) drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); if (val & PSR_EVENT_MEMORY_UP) drm_dbg_kms(&i915->drm, "\tMemory up\n"); if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); if (val & PSR_EVENT_WD_TIMER_EXPIRE) drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); if (val & PSR_EVENT_REGISTER_UPDATE) drm_dbg_kms(&i915->drm, "\tRegister updated\n"); if (val & PSR_EVENT_HDCP_ENABLE) drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); if (val & PSR_EVENT_KVMR_SESSION_ENABLE) drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); if (val & PSR_EVENT_VBI_ENABLE) drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); if (val & PSR_EVENT_LPSP_MODE_EXIT) drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); } void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { intel_dp->psr.last_entry_attempt = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) { intel_dp->psr.last_exit = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR exit completed\n", transcoder_name(cpu_transcoder)); if (DISPLAY_VER(dev_priv) >= 9) { u32 val; val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0); psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled); } } if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); intel_dp->psr.irq_aux_error = true; /* * If this interruption is not masked it will keep * interrupting so fast that it prevents the scheduled * work to run. * Also after a PSR error, we don't want to arm PSR * again so we don't care about unmask the interruption * or unset irq_aux_error. */ intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder), 0, psr_irq_psr_error_bit_get(intel_dp)); queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } } static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) { u8 alpm_caps = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps) != 1) return false; return alpm_caps & DP_ALPM_CAP; } static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 val = 8; /* assume the worst if we can't read the value */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; else drm_dbg_kms(&i915->drm, "Unable to get sink synchronization latency, assuming 8 frames\n"); return val; } static void intel_dp_get_su_granularity(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); ssize_t r; u16 w; u8 y; /* If sink don't have specific granularity requirements set legacy ones */ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) { /* As PSR2 HW sends full lines, we do not care about x granularity */ w = 4; y = 4; goto exit; } r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2); if (r != 2) drm_dbg_kms(&i915->drm, "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); /* * Spec says that if the value read is 0 the default granularity should * be used instead. */ if (r != 2 || w == 0) w = 4; r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1); if (r != 1) { drm_dbg_kms(&i915->drm, "Unable to read DP_PSR2_SU_Y_GRANULARITY\n"); y = 4; } if (y == 0) y = 1; exit: intel_dp->psr.su_w_granularity = w; intel_dp->psr.su_y_granularity = y; } void intel_psr_init_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); if (!intel_dp->psr_dpcd[0]) return; drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { drm_dbg_kms(&dev_priv->drm, "PSR support not currently available for this panel\n"); return; } if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { drm_dbg_kms(&dev_priv->drm, "Panel lacks power state control, PSR cannot be enabled\n"); return; } intel_dp->psr.sink_support = true; intel_dp->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); if (DISPLAY_VER(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { bool y_req = intel_dp->psr_dpcd[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; bool alpm = intel_dp_get_alpm_status(intel_dp); /* * All panels that supports PSR version 03h (PSR2 + * Y-coordinate) can handle Y-coordinates in VSC but we are * only sure that it is going to be used when required by the * panel. This way panel is capable to do selective update * without a aux frame sync. * * To support PSR version 02h and PSR version 03h without * Y-coordinate requirement panels we would need to enable * GTC first. */ intel_dp->psr.sink_psr2_support = y_req && alpm; drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", intel_dp->psr.sink_psr2_support ? "" : "not "); if (intel_dp->psr.sink_psr2_support) { intel_dp->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); intel_dp_get_su_granularity(intel_dp); } } } static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 aux_clock_divider, aux_ctl; /* write DP_SET_POWER=D0 */ static const u8 aux_msg[] = { [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf), [1] = (DP_SET_POWER >> 8) & 0xff, [2] = DP_SET_POWER & 0xff, [3] = 1 - 1, [4] = DP_SET_POWER_D0, }; int i; BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) intel_de_write(dev_priv, psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2), intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); /* Start with bits set for DDI_AUX_CTL register */ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), aux_clock_divider); /* Select only valid bits for SRD_AUX_CTL */ aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder), aux_ctl); } static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ if (intel_dp->psr.psr2_enabled) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; } else { if (intel_dp->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; if (DISPLAY_VER(dev_priv) >= 8) dpcd_val |= DP_PSR_CRC_VERIFICATION; } if (intel_dp->psr.req_psr2_sdp_prior_scanline) dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val = 0; if (DISPLAY_VER(dev_priv) >= 11) val |= EDP_PSR_TP4_TIME_0us; if (dev_priv->params.psr_safest_params) { val |= EDP_PSR_TP1_TIME_2500us; val |= EDP_PSR_TP2_TP3_TIME_2500us; goto check_tp3_sel; } if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0) val |= EDP_PSR_TP1_TIME_0us; else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100) val |= EDP_PSR_TP1_TIME_100us; else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500) val |= EDP_PSR_TP1_TIME_500us; else val |= EDP_PSR_TP1_TIME_2500us; if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) val |= EDP_PSR_TP2_TP3_TIME_0us; else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR_TP2_TP3_TIME_100us; else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500) val |= EDP_PSR_TP2_TP3_TIME_500us; else val |= EDP_PSR_TP2_TP3_TIME_2500us; /* * WA 0479: hsw,bdw * "Do not skip both TP1 and TP2/TP3" */ if (DISPLAY_VER(dev_priv) < 9 && connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) val |= EDP_PSR_TP2_TP3_TIME_100us; check_tp3_sel: if (intel_dp_source_supports_tps3(dev_priv) && drm_dp_tps3_supported(intel_dp->dpcd)) val |= EDP_PSR_TP_TP1_TP3; else val |= EDP_PSR_TP_TP1_TP2; return val; } static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int idle_frames; /* Let's use 6 as the minimum to cover all known cases including the * off-by-one issue that HW has in some cases. */ idle_frames = max(6, connector->panel.vbt.psr.idle_frames); idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1); if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) idle_frames = 0xf; return idle_frames; } static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); if (IS_HASWELL(dev_priv)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (intel_dp->psr.link_standby) val |= EDP_PSR_LINK_STANDBY; val |= intel_psr1_get_tp_time(intel_dp); if (DISPLAY_VER(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val = 0; if (dev_priv->params.psr_safest_params) return EDP_PSR2_TP2_TIME_2500us; if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) val |= EDP_PSR2_TP2_TIME_50us; else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR2_TP2_TIME_100us; else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) val |= EDP_PSR2_TP2_TIME_500us; else val |= EDP_PSR2_TP2_TIME_2500us; return val; } static int psr2_block_count_lines(struct intel_dp *intel_dp) { return intel_dp->psr.io_wake_lines < 9 && intel_dp->psr.fast_wake_lines < 9 ? 8 : 12; } static int psr2_block_count(struct intel_dp *intel_dp) { return psr2_block_count_lines(intel_dp) / 4; } static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv)) val |= EDP_SU_TRACK_ENABLE; if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12) val |= EDP_Y_COORDINATE_ENABLE; val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); if (DISPLAY_VER(dev_priv) >= 12) { if (psr2_block_count(intel_dp) > 2) val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; else val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; } /* Wa_22012278275:adl-p */ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { 2, /* 5 lines */ 1, /* 6 lines */ 0, /* 7 lines */ 3, /* 8 lines */ 6, /* 9 lines */ 5, /* 10 lines */ 4, /* 11 lines */ 7, /* 12 lines */ }; /* * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ int tmp; tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES); tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES); } else if (DISPLAY_VER(dev_priv) >= 12) { val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) val |= EDP_PSR2_SU_SDP_SCANLINE; if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0); } /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0); intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); } static bool transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; else if (DISPLAY_VER(dev_priv) >= 12) return cpu_transcoder == TRANSCODER_A; else if (DISPLAY_VER(dev_priv) >= 9) return cpu_transcoder == TRANSCODER_EDP; else return false; } static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) { if (!cstate || !cstate->hw.active) return 0; return DIV_ROUND_UP(1000 * 1000, drm_mode_vrefresh(&cstate->hw.adjusted_mode)); } static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), EDP_PSR2_IDLE_FRAMES_MASK, EDP_PSR2_IDLE_FRAMES(idle_frames)); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); psr2_program_idle_frames(intel_dp, 0); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); } static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); } static void tgl_dc3co_disable_work(struct work_struct *work) { struct intel_dp *intel_dp = container_of(work, typeof(*intel_dp), psr.dc3co_work.work); mutex_lock(&intel_dp->psr.lock); /* If delayed work is pending, it is not idle */ if (delayed_work_pending(&intel_dp->psr.dc3co_work)) goto unlock; tgl_psr2_disable_dc3co(intel_dp); unlock: mutex_unlock(&intel_dp->psr.lock); } static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp) { if (!intel_dp->psr.dc3co_exitline) return; cancel_delayed_work(&intel_dp->psr.dc3co_work); /* Before PSR2 exit disallow dc3co*/ tgl_psr2_disable_dc3co(intel_dp); } static bool dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum port port = dig_port->base.port; if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) return pipe <= PIPE_B && port <= PORT_B; else return pipe == PIPE_A && port == PORT_A; } static void tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 exit_scanlines; /* * FIXME: Due to the changed sequence of activating/deactivating DC3CO, * disable DC3CO until the changed dc3co activating/deactivating sequence * is applied. B.Specs:49196 */ return; /* * DMC's DC3CO exit mechanism has an issue with Selective Fecth * TODO: when the issue is addressed, this restriction should be removed. */ if (crtc_state->enable_psr2_sel_fetch) return; if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) return; /* Wa_16011303918:adl-p */ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) return; /* * DC3CO Exit time 200us B.Spec 49196 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 */ exit_scanlines = intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) return; crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; } static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!dev_priv->params.enable_psr2_sel_fetch && intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { drm_dbg_kms(&dev_priv->drm, "PSR2 sel fetch not enabled, disabled by parameter\n"); return false; } if (crtc_state->uapi.async_flip) { drm_dbg_kms(&dev_priv->drm, "PSR2 sel fetch not enabled, async flip enabled\n"); return false; } return crtc_state->enable_psr2_sel_fetch = true; } static bool psr2_granularity_check(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; u16 y_granularity = 0; /* PSR2 HW only send full lines so we only need to validate the width */ if (crtc_hdisplay % intel_dp->psr.su_w_granularity) return false; if (crtc_vdisplay % intel_dp->psr.su_y_granularity) return false; /* HW tracking is only aligned to 4 lines */ if (!crtc_state->enable_psr2_sel_fetch) return intel_dp->psr.su_y_granularity == 4; /* * adl_p and mtl platforms have 1 line granularity. * For other platforms with SW tracking we can adjust the y coordinates * to match sink requirement if multiple of 4. */ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) y_granularity = intel_dp->psr.su_y_granularity; else if (intel_dp->psr.su_y_granularity <= 2) y_granularity = 4; else if ((intel_dp->psr.su_y_granularity % 4) == 0) y_granularity = intel_dp->psr.su_y_granularity; if (y_granularity == 0 || crtc_vdisplay % y_granularity) return false; if (crtc_state->dsc.compression_enable && vdsc_cfg->slice_height % y_granularity) return false; crtc_state->su_y_granularity = y_granularity; return true; } static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 hblank_total, hblank_ns, req_ns; hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */ req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000); if ((hblank_ns - req_ns) > 100) return true; /* Not supported <13 / Wa_22012279113:adl-p */ if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) return false; crtc_state->req_psr2_sdp_prior_scanline = true; return true; } static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; u8 max_wake_lines; if (DISPLAY_VER(i915) >= 12) { io_wake_time = 42; /* * According to Bspec it's 42us, but based on testing * it is not enough -> use 45 us. */ fast_wake_time = 45; max_wake_lines = 12; } else { io_wake_time = 50; fast_wake_time = 32; max_wake_lines = 8; } io_wake_lines = intel_usecs_to_scanlines( &crtc_state->hw.adjusted_mode, io_wake_time); fast_wake_lines = intel_usecs_to_scanlines( &crtc_state->hw.adjusted_mode, fast_wake_time); if (io_wake_lines > max_wake_lines || fast_wake_lines > max_wake_lines) return false; if (i915->params.psr_safest_params) io_wake_lines = fast_wake_lines = max_wake_lines; /* According to Bspec lower limit should be set as 7 lines. */ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); return true; } static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; if (!intel_dp->psr.sink_psr2_support) return false; /* JSL and EHL only supports eDP 1.3 */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n"); return false; } /* Wa_16011181250 */ if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG2(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n"); return false; } if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n"); return false; } if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not supported in transcoder %s\n", transcoder_name(crtc_state->cpu_transcoder)); return false; } if (!psr2_global_enabled(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); return false; } /* * DSC and PSR2 cannot be enabled simultaneously. If a requested * resolution requires DSC to be enabled, priority is given to DSC * over PSR2. */ if (crtc_state->dsc.compression_enable && (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) { drm_dbg_kms(&dev_priv->drm, "PSR2 cannot be enabled since DSC is enabled\n"); return false; } if (crtc_state->crc_enabled) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); return false; } if (DISPLAY_VER(dev_priv) >= 12) { psr_max_h = 5120; psr_max_v = 3200; max_bpp = 30; } else if (DISPLAY_VER(dev_priv) >= 10) { psr_max_h = 4096; psr_max_v = 2304; max_bpp = 24; } else if (DISPLAY_VER(dev_priv) == 9) { psr_max_h = 3640; psr_max_v = 2304; max_bpp = 24; } if (crtc_state->pipe_bpp > max_bpp) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, pipe bpp %d > max supported %d\n", crtc_state->pipe_bpp, max_bpp); return false; } /* Wa_16011303918:adl-p */ if (crtc_state->vrr.enable && IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, not compatible with HW stepping + VRR\n"); return false; } if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"); return false; } if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, Unable to use long enough wake times\n"); return false; } /* Vblank >= PSR2_CTL Block Count Number maximum line count */ if (crtc_state->hw.adjusted_mode.crtc_vblank_end - crtc_state->hw.adjusted_mode.crtc_vblank_start < psr2_block_count_lines(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, too short vblank time\n"); return false; } if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); return false; } } if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); goto unsupported; } if (!crtc_state->enable_psr2_sel_fetch && (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); goto unsupported; } tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; unsupported: crtc_state->enable_psr2_sel_fetch = false; return false; } void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int psr_setup_time; /* * Current PSR panels don't work reliably with VRR enabled * So if VRR is enabled, do not enable PSR. */ if (crtc_state->vrr.enable) return; if (!CAN_PSR(intel_dp)) return; if (!psr_global_enabled(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); return; } if (intel_dp->psr.sink_not_reliable) { drm_dbg_kms(&dev_priv->drm, "PSR sink implementation is not reliable\n"); return; } if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { drm_dbg_kms(&dev_priv->drm, "PSR condition failed: Interlaced mode enabled\n"); return; } psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); if (psr_setup_time < 0) { drm_dbg_kms(&dev_priv->drm, "PSR condition failed: Invalid PSR setup time (0x%02x)\n", intel_dp->psr_dpcd[1]); return; } if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { drm_dbg_kms(&dev_priv->drm, "PSR condition failed: PSR setup time (%d us) too long\n", psr_setup_time); return; } crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, &crtc_state->psr_vsc); } void intel_psr_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_dp *intel_dp; u32 val; if (!dig_port) return; intel_dp = &dig_port->dp; if (!CAN_PSR(intel_dp)) return; mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) goto unlock; /* * Not possible to read EDP_PSR/PSR2_CTL registers as it is * enabled/disabled because of frontbuffer tracking and others. */ pipe_config->has_psr = true; pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); if (!intel_dp->psr.psr2_enabled) goto unlock; if (HAS_PSR2_SEL_FETCH(dev_priv)) { val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder)); if (val & PSR2_MAN_TRK_CTL_ENABLE) pipe_config->enable_psr2_sel_fetch = true; } if (DISPLAY_VER(dev_priv) >= 12) { val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder)); pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val); } unlock: mutex_unlock(&intel_dp->psr.lock); } static void intel_psr_activate(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; drm_WARN_ON(&dev_priv->drm, transcoder_has_psr2(dev_priv, cpu_transcoder) && intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); lockdep_assert_held(&intel_dp->psr.lock); /* psr1 and psr2 are mutually exclusive.*/ if (intel_dp->psr.psr2_enabled) hsw_activate_psr2(intel_dp); else hsw_activate_psr1(intel_dp); intel_dp->psr.active = true; } static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp) { switch (intel_dp->psr.pipe) { case PIPE_A: return LATENCY_REPORTING_REMOVED_PIPE_A; case PIPE_B: return LATENCY_REPORTING_REMOVED_PIPE_B; case PIPE_C: return LATENCY_REPORTING_REMOVED_PIPE_C; case PIPE_D: return LATENCY_REPORTING_REMOVED_PIPE_D; default: MISSING_CASE(intel_dp->psr.pipe); return 0; } } /* * Wa_16013835468 * Wa_14015648006 */ static void wm_optimization_wa(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); bool set_wa_bit = false; /* Wa_14015648006 */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || IS_DISPLAY_VER(dev_priv, 11, 13)) set_wa_bit |= crtc_state->wm_level_disabled; /* Wa_16013835468 */ if (DISPLAY_VER(dev_priv) == 12) set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start != crtc_state->hw.adjusted_mode.crtc_vdisplay; if (set_wa_bit) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, wa_16013835468_bit_get(intel_dp)); else intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, wa_16013835468_bit_get(intel_dp), 0); } static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; /* * Only HSW and BDW have PSR AUX registers that need to be setup. * SKL+ use hardcoded values PSR AUX transactions */ if (DISPLAY_VER(dev_priv) < 9) hsw_psr_setup_aux(intel_dp); /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also * mask LPSP to avoid dependency on other drivers that might block * runtime_pm besides preventing other hw tracking issues now we * can rely on frontbuffer tracking. */ mask = EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP | EDP_PSR_DEBUG_MASK_MAX_SLEEP; /* * No separate pipe reg write mask on hsw/bdw, so have to unmask all * registers in order to keep the CURSURFLIVE tricks working :( */ if (IS_DISPLAY_VER(dev_priv, 9, 10)) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; /* allow PSR with sprite enabled */ if (IS_HASWELL(dev_priv)) mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask); psr_irq_control(intel_dp); /* * TODO: if future platforms supports DC3CO in more than one * transcoder, EXITLINE will need to be unset when disabling PSR */ if (intel_dp->psr.dc3co_exitline) intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK, intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); /* * Wa_16013835468 * Wa_14015648006 */ wm_optimization_wa(intel_dp, crtc_state); if (intel_dp->psr.psr2_enabled) { if (DISPLAY_VER(dev_priv) == 9) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, PSR2_VSC_ENABLE_PROG_HEADER | PSR2_ADD_VERTICAL_LINE_COUNT); /* * Wa_16014451276:adlp,mtl[a0,b0] * All supported adlp panels have 1-based X granularity, this may * cause issues if non-supported panels are used. */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); else if (IS_ALDERLAKE_P(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0, MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); else if (IS_ALDERLAKE_P(dev_priv)) intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0, CLKGATE_DIS_MISC_DMASC_GATING_DIS); } } static bool psr_interrupt_error_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; /* * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR * will still keep the error set even after the reset done in the * irq_preinstall and irq_uninstall hooks. * And enabling in this situation cause the screen to freeze in the * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder)); val &= psr_irq_psr_error_bit_get(intel_dp); if (val) { intel_dp->psr.sink_not_reliable = true; drm_dbg_kms(&dev_priv->drm, "PSR interruption error set, not enabling PSR\n"); return false; } return true; } static void intel_psr_enable_locked(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); struct intel_encoder *encoder = &dig_port->base; u32 val; drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); intel_dp->psr.psr2_enabled = crtc_state->has_psr2; intel_dp->psr.busy_frontbuffer_bits = 0; intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; intel_dp->psr.transcoder = crtc_state->cpu_transcoder; /* DC5/DC6 requires at least 6 idle frames */ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); intel_dp->psr.dc3co_exit_delay = val; intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline; intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; intel_dp->psr.psr2_sel_fetch_cff_enabled = false; intel_dp->psr.req_psr2_sdp_prior_scanline = crtc_state->req_psr2_sdp_prior_scanline; if (!psr_interrupt_error_check(intel_dp)) return; drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", intel_dp->psr.psr2_enabled ? "2" : "1"); intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); intel_snps_phy_update_psr_power_state(dev_priv, phy, true); intel_psr_enable_sink(intel_dp); intel_psr_enable_source(intel_dp, crtc_state); intel_dp->psr.enabled = true; intel_dp->psr.paused = false; intel_psr_activate(intel_dp); } static void intel_psr_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val; if (!intel_dp->psr.active) { if (transcoder_has_psr2(dev_priv, cpu_transcoder)) { val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; } if (intel_dp->psr.psr2_enabled) { tgl_disallow_dc3co_on_psr2_exit(intel_dp); val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), EDP_PSR2_ENABLE, 0); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); } else { val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), EDP_PSR_ENABLE, 0); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); } intel_dp->psr.active = false; } static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t psr_status; u32 psr_status_mask; if (intel_dp->psr.psr2_enabled) { psr_status = EDP_PSR2_STATUS(cpu_transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { psr_status = psr_status_reg(dev_priv, cpu_transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } /* Wait till PSR is idle */ if (intel_de_wait_for_clear(dev_priv, psr_status, psr_status_mask, 2000)) drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); } static void intel_psr_disable_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; enum phy phy = intel_port_to_phy(dev_priv, dp_to_dig_port(intel_dp)->base.port); lockdep_assert_held(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) return; drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", intel_dp->psr.psr2_enabled ? "2" : "1"); intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); /* * Wa_16013835468 * Wa_14015648006 */ if (DISPLAY_VER(dev_priv) >= 11) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, wa_16013835468_bit_get(intel_dp), 0); if (intel_dp->psr.psr2_enabled) { /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, MTL_CLKGATE_DIS_TRANS(cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); else if (IS_ALDERLAKE_P(dev_priv)) intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); } intel_snps_phy_update_psr_power_state(dev_priv, phy, false); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); if (intel_dp->psr.psr2_enabled) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); intel_dp->psr.enabled = false; intel_dp->psr.psr2_enabled = false; intel_dp->psr.psr2_sel_fetch_enabled = false; intel_dp->psr.psr2_sel_fetch_cff_enabled = false; } /** * intel_psr_disable - Disable PSR * @intel_dp: Intel DP * @old_crtc_state: old CRTC state * * This function needs to be called before disabling pipe. */ void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!old_crtc_state->has_psr) return; if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp))) return; mutex_lock(&intel_dp->psr.lock); intel_psr_disable_locked(intel_dp); mutex_unlock(&intel_dp->psr.lock); cancel_work_sync(&intel_dp->psr.work); cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); } /** * intel_psr_pause - Pause PSR * @intel_dp: Intel DP * * This function need to be called after enabling psr. */ void intel_psr_pause(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp)) return; mutex_lock(&psr->lock); if (!psr->enabled) { mutex_unlock(&psr->lock); return; } /* If we ever hit this, we will need to add refcount to pause/resume */ drm_WARN_ON(&dev_priv->drm, psr->paused); intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); psr->paused = true; mutex_unlock(&psr->lock); cancel_work_sync(&psr->work); cancel_delayed_work_sync(&psr->dc3co_work); } /** * intel_psr_resume - Resume PSR * @intel_dp: Intel DP * * This function need to be called after pausing psr. */ void intel_psr_resume(struct intel_dp *intel_dp) { struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp)) return; mutex_lock(&psr->lock); if (!psr->paused) goto unlock; psr->paused = false; intel_psr_activate(intel_dp); unlock: mutex_unlock(&psr->lock); } static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv) { return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 : PSR2_MAN_TRK_CTL_ENABLE; } static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv) { return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; } static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv) { return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; } static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv) { return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME; } static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | man_trk_ctl_single_full_frame_bit_get(dev_priv) | man_trk_ctl_continuos_full_frame(dev_priv)); /* * Display WA #0884: skl+ * This documented WA for bxt can be safely applied * broadly so we can force HW tracking to exit PSR * instead of disabling and re-enabling. * Workaround tells us to write 0 to CUR_SURFLIVE_A, * but it makes more sense write to the current active * pipe. * * This workaround do not exist for platforms with display 10 or newer * but testing proved that it works for up display 13, for newer * than that testing will be needed. */ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); } void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; if (plane->id == PLANE_CURSOR) intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), plane_state->ctl); else intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), PLANE_SEL_FETCH_CTL_ENABLE); } void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_rect *clip; u32 val; int x, y; if (!crtc_state->enable_psr2_sel_fetch) return; if (plane->id == PLANE_CURSOR) return; clip = &plane_state->psr2_sel_fetch_area; val = (clip->y1 + plane_state->uapi.dst.y1) << 16; val |= plane_state->uapi.dst.x1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); x = plane_state->view.color_plane[color_plane].x; /* * From Bspec: UV surface Start Y Position = half of Y plane Y * start position. */ if (!color_plane) y = plane_state->view.color_plane[color_plane].y + clip->y1; else y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; val = y << 16 | x; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), val); /* Sizes are 0 based */ val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_encoder *encoder; if (!crtc_state->enable_psr2_sel_fetch) return; for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); lockdep_assert_held(&intel_dp->psr.lock); if (intel_dp->psr.psr2_sel_fetch_cff_enabled) return; break; } intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), crtc_state->psr2_man_track_ctl); } static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, struct drm_rect *clip, bool full_update) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = man_trk_ctl_enable_bit_get(dev_priv); /* SF partial frame enable has to be set even on full update */ val |= man_trk_ctl_partial_frame_bit_get(dev_priv); if (full_update) { val |= man_trk_ctl_single_full_frame_bit_get(dev_priv); val |= man_trk_ctl_continuos_full_frame(dev_priv); goto exit; } if (clip->y1 == -1) goto exit; if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) { val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1); val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1); } else { drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1); } exit: crtc_state->psr2_man_track_ctl = val; } static void clip_area_update(struct drm_rect *overlap_damage_area, struct drm_rect *damage_area, struct drm_rect *pipe_src) { if (!drm_rect_intersect(damage_area, pipe_src)) return; if (overlap_damage_area->y1 == -1) { overlap_damage_area->y1 = damage_area->y1; overlap_damage_area->y2 = damage_area->y2; return; } if (damage_area->y1 < overlap_damage_area->y1) overlap_damage_area->y1 = damage_area->y1; if (damage_area->y2 > overlap_damage_area->y2) overlap_damage_area->y2 = damage_area->y2; } static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state, struct drm_rect *pipe_clip) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u16 y_alignment; /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ if (crtc_state->dsc.compression_enable && (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)) y_alignment = vdsc_cfg->slice_height; else y_alignment = crtc_state->su_y_granularity; pipe_clip->y1 -= pipe_clip->y1 % y_alignment; if (pipe_clip->y2 % y_alignment) pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment; } /* * TODO: Not clear how to handle planes with negative position, * also planes are not updated if they have a negative X * position so for now doing a full update in this cases * * Plane scaling and rotation is not supported by selective fetch and both * properties can change without a modeset, so need to be check at every * atomic commit. */ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state) { if (plane_state->uapi.dst.y1 < 0 || plane_state->uapi.dst.x1 < 0 || plane_state->scaler_id >= 0 || plane_state->uapi.rotation != DRM_MODE_ROTATE_0) return false; return true; } /* * Check for pipe properties that is not supported by selective fetch. * * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch * enabled and going to the full update path. */ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state) { if (crtc_state->scaler_state.scaler_id >= 0) return false; return true; } int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 }; struct intel_plane_state *new_plane_state, *old_plane_state; struct intel_plane *plane; bool full_update = false; int i, ret; if (!crtc_state->enable_psr2_sel_fetch) return 0; if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) { full_update = true; goto skip_sel_fetch_set_loop; } /* * Calculate minimal selective fetch area of each plane and calculate * the pipe damaged area. * In the next loop the plane selective fetch area will actually be set * using whole pipe damaged area. */ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, .x2 = INT_MAX }; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) continue; if (!new_plane_state->uapi.visible && !old_plane_state->uapi.visible) continue; if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { full_update = true; break; } /* * If visibility or plane moved, mark the whole plane area as * damaged as it needs to be complete redraw in the new and old * position. */ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible || !drm_rect_equals(&new_plane_state->uapi.dst, &old_plane_state->uapi.dst)) { if (old_plane_state->uapi.visible) { damaged_area.y1 = old_plane_state->uapi.dst.y1; damaged_area.y2 = old_plane_state->uapi.dst.y2; clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } if (new_plane_state->uapi.visible) { damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } continue; } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) { /* If alpha changed mark the whole plane area as damaged */ damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); continue; } src = drm_plane_state_src(&new_plane_state->uapi); drm_rect_fp_to_int(&src, &src); if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi, &new_plane_state->uapi, &damaged_area)) continue; damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } /* * TODO: For now we are just using full update in case * selective fetch area calculation fails. To optimize this we * should identify cases where this happens and fix the area * calculation for those. */ if (pipe_clip.y1 == -1) { drm_info_once(&dev_priv->drm, "Selective fetch area calculation failed in pipe %c\n", pipe_name(crtc->pipe)); full_update = true; } if (full_update) goto skip_sel_fetch_set_loop; /* Wa_14014971492 */ if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) && crtc_state->splitter.enable) pipe_clip.y1 = 0; ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) return ret; intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip); /* * Now that we have the pipe damaged area check if it intersect with * every plane, if it does set the plane selective fetch area. */ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect *sel_fetch_area, inter; struct intel_plane *linked = new_plane_state->planar_linked_plane; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || !new_plane_state->uapi.visible) continue; inter = pipe_clip; if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) continue; if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { full_update = true; break; } sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; crtc_state->update_planes |= BIT(plane->id); /* * Sel_fetch_area is calculated for UV plane. Use * same area for Y plane as well. */ if (linked) { struct intel_plane_state *linked_new_plane_state; struct drm_rect *linked_sel_fetch_area; linked_new_plane_state = intel_atomic_get_plane_state(state, linked); if (IS_ERR(linked_new_plane_state)) return PTR_ERR(linked_new_plane_state); linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; linked_sel_fetch_area->y1 = sel_fetch_area->y1; linked_sel_fetch_area->y2 = sel_fetch_area->y2; crtc_state->update_planes |= BIT(linked->id); } } skip_sel_fetch_set_loop: psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); return 0; } void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; if (!HAS_PSR(i915)) return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, old_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_psr *psr = &intel_dp->psr; bool needs_to_disable = false; mutex_lock(&psr->lock); /* * Reasons to disable: * - PSR disabled in new state * - All planes will go inactive * - Changing between PSR versions * - Display WA #1136: skl, bxt */ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); needs_to_disable |= !new_crtc_state->has_psr; needs_to_disable |= !new_crtc_state->active_planes; needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; needs_to_disable |= DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled; if (psr->enabled && needs_to_disable) intel_psr_disable_locked(intel_dp); else if (psr->enabled && new_crtc_state->wm_level_disabled) /* Wa_14015648006 */ wm_optimization_wa(intel_dp, new_crtc_state); mutex_unlock(&psr->lock); } } static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_encoder *encoder; if (!crtc_state->has_psr) return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_psr *psr = &intel_dp->psr; bool keep_disabled = false; mutex_lock(&psr->lock); drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes); keep_disabled |= psr->sink_not_reliable; keep_disabled |= !crtc_state->active_planes; /* Display WA #1136: skl, bxt */ keep_disabled |= DISPLAY_VER(dev_priv) < 11 && crtc_state->wm_level_disabled; if (!psr->enabled && !keep_disabled) intel_psr_enable_locked(intel_dp, crtc_state); else if (psr->enabled && !crtc_state->wm_level_disabled) /* Wa_14015648006 */ wm_optimization_wa(intel_dp, crtc_state); /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ if (crtc_state->crc_enabled && psr->enabled) psr_force_hw_tracking_exit(intel_dp); mutex_unlock(&psr->lock); } } void intel_psr_post_plane_update(const struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; if (!HAS_PSR(dev_priv)) return; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) _intel_psr_post_plane_update(state, crtc_state); } static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. * As all higher states has bit 4 of PSR2 state set we can just wait for * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. */ return intel_de_wait_for_clear(dev_priv, EDP_PSR2_STATUS(cpu_transcoder), EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); } static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; /* * From bspec: Panel Self Refresh (BDW+) * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ return intel_de_wait_for_clear(dev_priv, psr_status_reg(dev_priv, cpu_transcoder), EDP_PSR_STATUS_STATE_MASK, 50); } /** * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state * * This function is expected to be called from pipe_update_start() where it is * not expected to race with PSR enable or disable. */ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); struct intel_encoder *encoder; if (!new_crtc_state->has_psr) return; for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, new_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int ret; lockdep_assert_held(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) continue; if (intel_dp->psr.psr2_enabled) ret = _psr2_ready_for_pipe_update_locked(intel_dp); else ret = _psr1_ready_for_pipe_update_locked(intel_dp); if (ret) drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); } } static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; i915_reg_t reg; u32 mask; int err; if (!intel_dp->psr.enabled) return false; if (intel_dp->psr.psr2_enabled) { reg = EDP_PSR2_STATUS(cpu_transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { reg = psr_status_reg(dev_priv, cpu_transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } mutex_unlock(&intel_dp->psr.lock); err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); if (err) drm_err(&dev_priv->drm, "Timed out waiting for PSR Idle for re-enable\n"); /* After the unlocked wait, verify that PSR is still wanted! */ mutex_lock(&intel_dp->psr.lock); return err == 0 && intel_dp->psr.enabled; } static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) { struct drm_connector_list_iter conn_iter; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct drm_connector *conn; int err = 0; state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) return -ENOMEM; drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; to_intel_atomic_state(state)->internal = true; retry: drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); drm_for_each_connector_iter(conn, &conn_iter) { struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) continue; conn_state = drm_atomic_get_connector_state(state, conn); if (IS_ERR(conn_state)) { err = PTR_ERR(conn_state); break; } if (!conn_state->crtc) continue; crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); if (IS_ERR(crtc_state)) { err = PTR_ERR(crtc_state); break; } /* Mark mode as changed to trigger a pipe->update() */ crtc_state->mode_changed = true; } drm_connector_list_iter_end(&conn_iter); if (err == 0) err = drm_atomic_commit(state); if (err == -EDEADLK) { drm_atomic_state_clear(state); err = drm_modeset_backoff(&ctx); if (!err) goto retry; } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); drm_atomic_state_put(state); return err; } int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; u32 old_mode; int ret; if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); return -EINVAL; } ret = mutex_lock_interruptible(&intel_dp->psr.lock); if (ret) return ret; old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK; intel_dp->psr.debug = val; /* * Do it right away if it's already enabled, otherwise it will be done * when enabling the source. */ if (intel_dp->psr.enabled) psr_irq_control(intel_dp); mutex_unlock(&intel_dp->psr.lock); if (old_mode != mode) ret = intel_psr_fastset_force(dev_priv); return ret; } static void intel_psr_handle_irq(struct intel_dp *intel_dp) { struct intel_psr *psr = &intel_dp->psr; intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; /* let's make sure that sink is awaken */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } static void intel_psr_work(struct work_struct *work) { struct intel_dp *intel_dp = container_of(work, typeof(*intel_dp), psr.work); mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) goto unlock; if (READ_ONCE(intel_dp->psr.irq_aux_error)) intel_psr_handle_irq(intel_dp); /* * We have to make sure PSR is ready for re-enable * otherwise it keeps disabled until next full enable/disable cycle. * PSR might take some time to get fully disabled * and be ready for re-enable. */ if (!__psr_wait_for_idle_locked(intel_dp)) goto unlock; /* * The delayed work can race with an invalidate hence we need to * recheck. Since psr_flush first clears this and then reschedules we * won't ever miss a flush when bailing out here. */ if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active) goto unlock; intel_psr_activate(intel_dp); unlock: mutex_unlock(&intel_dp->psr.lock); } static void _psr_invalidate_handle(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 val; if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* Send one update otherwise lag is observed in screen */ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); return; } val = man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | man_trk_ctl_continuos_full_frame(dev_priv); intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val); intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = true; } else { intel_psr_exit(intel_dp); } } /** * intel_psr_invalidate - Invalidate PSR * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the invalidate * * Since the hardware frontbuffer tracking has gaps we need to integrate * with the software frontbuffer tracking. This function gets called every * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be * disabled if the frontbuffer mask contains a buffer relevant to PSR. * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." */ void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { struct intel_encoder *encoder; if (origin == ORIGIN_FLIP) return; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { unsigned int pipe_frontbuffer_bits = frontbuffer_bits; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); continue; } pipe_frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits; if (pipe_frontbuffer_bits) _psr_invalidate_handle(intel_dp); mutex_unlock(&intel_dp->psr.lock); } } /* * When we will be completely rely on PSR2 S/W tracking in future, * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP * event also therefore tgl_dc3co_flush_locked() require to be changed * accordingly in future. */ static void tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, enum fb_op_origin origin) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled || !intel_dp->psr.active) return; /* * At every frontbuffer flush flip event modified delay of delayed work, * when delayed work schedules that means display has been idle. */ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) return; tgl_psr2_enable_dc3co(intel_dp); mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work, intel_dp->psr.dc3co_exit_delay); } static void _psr_flush_handle(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; if (intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) { u32 val = man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | man_trk_ctl_single_full_frame_bit_get(dev_priv) | man_trk_ctl_continuos_full_frame(dev_priv); /* * Set psr2_sel_fetch_cff_enabled as false to allow selective * updates. Still keep cff bit enabled as we don't have proper * SU configuration in case update is sent for any reason after * sff bit gets cleared by the HW on next vblank. */ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val); intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); intel_dp->psr.psr2_sel_fetch_cff_enabled = false; } } else { /* * continuous full frame is disabled, only a single full * frame is required */ psr_force_hw_tracking_exit(intel_dp); } } else { psr_force_hw_tracking_exit(intel_dp); if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); } } /** * intel_psr_flush - Flush PSR * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the flush * * Since the hardware frontbuffer tracking has gaps we need to integrate * with the software frontbuffer tracking. This function gets called every * time frontbuffer rendering has completed and flushed out to memory. PSR * can be enabled again if no other frontbuffer relevant to PSR is dirty. * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. */ void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { struct intel_encoder *encoder; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { unsigned int pipe_frontbuffer_bits = frontbuffer_bits; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); continue; } pipe_frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits; /* * If the PSR is paused by an explicit intel_psr_paused() call, * we have to ensure that the PSR is not activated until * intel_psr_resume() is called. */ if (intel_dp->psr.paused) goto unlock; if (origin == ORIGIN_FLIP || (origin == ORIGIN_CURSOR_UPDATE && !intel_dp->psr.psr2_sel_fetch_enabled)) { tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin); goto unlock; } if (pipe_frontbuffer_bits == 0) goto unlock; /* By definition flush = invalidate + flush */ _psr_flush_handle(intel_dp); unlock: mutex_unlock(&intel_dp->psr.lock); } } /** * intel_psr_init - Init basic PSR work and mutex. * @intel_dp: Intel DP * * This function is called after the initializing connector. * (the initializing of connector treats the handling of connector capabilities) * And it initializes basic PSR stuff for each DP Encoder. */ void intel_psr_init(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!HAS_PSR(dev_priv)) return; /* * HSW spec explicitly says PSR is tied to port A. * BDW+ platforms have a instance of PSR registers per transcoder but * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder * than eDP one. * For now it only supports one instance of PSR for BDW, GEN9 and GEN11. * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. * But GEN12 supports a instance of PSR registers per transcoder. */ if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) { drm_dbg_kms(&dev_priv->drm, "PSR condition failed: Port not supported\n"); return; } intel_dp->psr.source_support = true; /* Set link_standby x link_off defaults */ if (DISPLAY_VER(dev_priv) < 12) /* For new platforms up to TGL let's respect VBT back again */ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link; INIT_WORK(&intel_dp->psr.work, intel_psr_work); INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); mutex_init(&intel_dp->psr.lock); } static int psr_get_status_and_error_status(struct intel_dp *intel_dp, u8 *status, u8 *error_status) { struct drm_dp_aux *aux = &intel_dp->aux; int ret; ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); if (ret != 1) return ret; ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); if (ret != 1) return ret; *status = *status & DP_PSR_SINK_STATE_MASK; return 0; } static void psr_alpm_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct drm_dp_aux *aux = &intel_dp->aux; struct intel_psr *psr = &intel_dp->psr; u8 val; int r; if (!psr->psr2_enabled) return; r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); if (r != 1) { drm_err(&dev_priv->drm, "Error reading ALPM status\n"); return; } if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; drm_dbg_kms(&dev_priv->drm, "ALPM lock timeout error, disabling PSR\n"); /* Clearing error */ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); } } static void psr_capability_changed_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; u8 val; int r; r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); if (r != 1) { drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); return; } if (val & DP_PSR_CAPS_CHANGE) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; drm_dbg_kms(&dev_priv->drm, "Sink PSR capability changed, disabling PSR\n"); /* Clearing it */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); } } void intel_psr_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; u8 status, error_status; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | DP_PSR_LINK_CRC_ERROR; if (!CAN_PSR(intel_dp)) return; mutex_lock(&psr->lock); if (!psr->enabled) goto exit; if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { drm_err(&dev_priv->drm, "Error reading PSR status or error status\n"); goto exit; } if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; } if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) drm_dbg_kms(&dev_priv->drm, "PSR sink internal error, disabling PSR\n"); if (error_status & DP_PSR_RFB_STORAGE_ERROR) drm_dbg_kms(&dev_priv->drm, "PSR RFB storage error, disabling PSR\n"); if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) drm_dbg_kms(&dev_priv->drm, "PSR VSC SDP uncorrectable error, disabling PSR\n"); if (error_status & DP_PSR_LINK_CRC_ERROR) drm_dbg_kms(&dev_priv->drm, "PSR Link CRC error, disabling PSR\n"); if (error_status & ~errors) drm_err(&dev_priv->drm, "PSR_ERROR_STATUS unhandled errors %x\n", error_status & ~errors); /* clear status register */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); psr_alpm_check(intel_dp); psr_capability_changed_check(intel_dp); exit: mutex_unlock(&psr->lock); } bool intel_psr_enabled(struct intel_dp *intel_dp) { bool ret; if (!CAN_PSR(intel_dp)) return false; mutex_lock(&intel_dp->psr.lock); ret = intel_dp->psr.enabled; mutex_unlock(&intel_dp->psr.lock); return ret; } /** * intel_psr_lock - grab PSR lock * @crtc_state: the crtc state * * This is initially meant to be used by around CRTC update, when * vblank sensitive registers are updated and we need grab the lock * before it to avoid vblank evasion. */ void intel_psr_lock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_encoder *encoder; if (!crtc_state->has_psr) return; for_each_intel_encoder_mask_with_psr(&i915->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock(&intel_dp->psr.lock); break; } } /** * intel_psr_unlock - release PSR lock * @crtc_state: the crtc state * * Release the PSR lock that was held during pipe update. */ void intel_psr_unlock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_encoder *encoder; if (!crtc_state->has_psr) return; for_each_intel_encoder_mask_with_psr(&i915->drm, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_unlock(&intel_dp->psr.lock); break; } } static void psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; const char *status = "unknown"; u32 val, status_val; if (intel_dp->psr.psr2_enabled) { static const char * const live_status[] = { "IDLE", "CAPTURE", "CAPTURE_FS", "SLEEP", "BUFON_FW", "ML_UP", "SU_STANDBY", "FAST_SLEEP", "DEEP_SLEEP", "BUF_ON", "TG_ON" }; val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } else { static const char * const live_status[] = { "IDLE", "SRDONACK", "SRDENT", "BUFOFF", "BUFON", "AUXACK", "SRDOFFACK", "SRDENT_ON", }; val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder)); status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); } static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; intel_wakeref_t wakeref; const char *status; bool enabled; u32 val; seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); if (psr->sink_support) seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); seq_puts(m, "\n"); if (!psr->sink_support) return 0; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&psr->lock); if (psr->enabled) status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; else status = "disabled"; seq_printf(m, "PSR mode: %s\n", status); if (!psr->enabled) { seq_printf(m, "PSR sink not reliable: %s\n", str_yes_no(psr->sink_not_reliable)); goto unlock; } if (psr->psr2_enabled) { val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", str_enabled_disabled(enabled), val); psr_source_status(intel_dp, m); seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", psr->busy_frontbuffer_bits); /* * SKL+ Perf counter is reset to 0 everytime DC state is entered */ val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder)); seq_printf(m, "Performance counter: %u\n", REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val)); if (psr->debug & I915_PSR_DEBUG_IRQ) { seq_printf(m, "Last attempted entry at: %lld\n", psr->last_entry_attempt); seq_printf(m, "Last exit at: %lld\n", psr->last_exit); } if (psr->psr2_enabled) { u32 su_frames_val[3]; int frame; /* * Reading all 3 registers before hand to minimize crossing a * frame boundary between register reads */ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame)); su_frames_val[frame / 3] = val; } seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { u32 su_blocks; su_blocks = su_frames_val[frame / 3] & PSR2_SU_STATUS_MASK(frame); su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); seq_printf(m, "%d\t%d\n", frame, su_blocks); } seq_printf(m, "PSR2 selective fetch: %s\n", str_enabled_disabled(psr->psr2_sel_fetch_enabled)); } unlock: mutex_unlock(&psr->lock); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } static int i915_edp_psr_status_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; struct intel_dp *intel_dp = NULL; struct intel_encoder *encoder; if (!HAS_PSR(dev_priv)) return -ENODEV; /* Find the first EDP which supports PSR */ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { intel_dp = enc_to_intel_dp(encoder); break; } if (!intel_dp) return -ENODEV; return intel_psr_status(m, intel_dp); } DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; struct intel_encoder *encoder; intel_wakeref_t wakeref; int ret = -ENODEV; if (!HAS_PSR(dev_priv)) return ret; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); // TODO: split to each transcoder's PSR debug state ret = intel_psr_debug_set(intel_dp, val); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } return ret; } static int i915_edp_psr_debug_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; struct intel_encoder *encoder; if (!HAS_PSR(dev_priv)) return -ENODEV; for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); // TODO: split to each transcoder's PSR debug state *val = READ_ONCE(intel_dp->psr.debug); return 0; } return -ENODEV; } DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, i915_edp_psr_debug_get, i915_edp_psr_debug_set, "%llu\n"); void intel_psr_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, i915, &i915_edp_psr_debug_fops); debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, i915, &i915_edp_psr_status_fops); } static int i915_psr_sink_status_show(struct seq_file *m, void *data) { struct intel_connector *connector = m->private; struct intel_dp *intel_dp = intel_attached_dp(connector); static const char * const sink_status[] = { "inactive", "transition to active, capture and display", "active, display from RFB", "active, capture and display on sink device timings", "transition to inactive, capture and display, timing re-sync", "reserved", "reserved", "sink internal error", }; const char *str; int ret; u8 val; if (!CAN_PSR(intel_dp)) { seq_puts(m, "PSR Unsupported\n"); return -ENODEV; } if (connector->base.status != connector_status_connected) return -ENODEV; ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); if (ret != 1) return ret < 0 ? ret : -EIO; val &= DP_PSR_SINK_STATE_MASK; if (val < ARRAY_SIZE(sink_status)) str = sink_status[val]; else str = "unknown"; seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); return 0; } DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); static int i915_psr_status_show(struct seq_file *m, void *data) { struct intel_connector *connector = m->private; struct intel_dp *intel_dp = intel_attached_dp(connector); return intel_psr_status(m, intel_dp); } DEFINE_SHOW_ATTRIBUTE(i915_psr_status); void intel_psr_connector_debugfs_add(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct dentry *root = connector->base.debugfs_entry; if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) return; debugfs_create_file("i915_psr_sink_status", 0444, root, connector, &i915_psr_sink_status_fops); if (HAS_PSR(i915)) debugfs_create_file("i915_psr_status", 0444, root, connector, &i915_psr_status_fops); }
linux-master
drivers/gpu/drm/i915/display/intel_psr.c
// SPDX-License-Identifier: MIT /* * Copyright © 2018 Intel Corporation * * Author: Gaurav K Singh <[email protected]> * Manasi Navare <[email protected]> */ #include <linux/limits.h> #include <drm/display/drm_dsc_helper.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_qp_tables.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) { const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!HAS_DSC(i915)) return false; if (DISPLAY_VER(i915) == 11 && cpu_transcoder == TRANSCODER_A) return false; return true; } static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (DISPLAY_VER(i915) >= 12) return true; if (cpu_transcoder == TRANSCODER_EDP || cpu_transcoder == TRANSCODER_DSI_0 || cpu_transcoder == TRANSCODER_DSI_1) return false; /* There's no pipe A DSC engine on ICL */ drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A); return true; } static void intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf, int bpp) { int bpc = vdsc_cfg->bits_per_component; /* Read range_minqp and range_max_qp from qp tables */ vdsc_cfg->rc_range_params[buf].range_min_qp = intel_lookup_range_min_qp(bpc, buf, bpp, vdsc_cfg->native_420); vdsc_cfg->rc_range_params[buf].range_max_qp = intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420); } /* * We are using the method provided in DSC 1.2a C-Model in codec_main.c * Above method use a common formula to derive values for any combination of DSC * variables. The formula approach may yield slight differences in the derived PPS * parameters from the original parameter sets. These differences are not consequential * to the coding performance because all parameter sets have been shown to produce * visually lossless quality (provides the same PPS values as * DSCParameterValuesVESA V1-2 spreadsheet). */ static void calculate_rc_params(struct drm_dsc_config *vdsc_cfg) { int bpc = vdsc_cfg->bits_per_component; int bpp = vdsc_cfg->bits_per_pixel >> 4; int qp_bpc_modifier = (bpc - 8) * 2; u32 res, buf_i, bpp_i; if (vdsc_cfg->slice_height >= 8) vdsc_cfg->first_line_bpg_offset = 12 + DIV_ROUND_UP((9 * min(34, vdsc_cfg->slice_height - 8)), 100); else vdsc_cfg->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1); /* * According to DSC 1.2 spec in Section 4.1 if native_420 is set: * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice * height < 8. * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma * preservation in second line. * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11 * fractional bits. */ if (vdsc_cfg->native_420) { if (vdsc_cfg->slice_height >= 8) vdsc_cfg->second_line_bpg_offset = 12; else vdsc_cfg->second_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1); vdsc_cfg->second_line_offset_adj = 512; vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11, vdsc_cfg->slice_height - 1); } /* Our hw supports only 444 modes as of today */ if (bpp >= 12) vdsc_cfg->initial_offset = 2048; else if (bpp >= 10) vdsc_cfg->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2); else if (bpp >= 8) vdsc_cfg->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2); else vdsc_cfg->initial_offset = 6144; /* initial_xmit_delay = rc_model_size/2/compression_bpp */ vdsc_cfg->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp); vdsc_cfg->flatness_min_qp = 3 + qp_bpc_modifier; vdsc_cfg->flatness_max_qp = 12 + qp_bpc_modifier; vdsc_cfg->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; vdsc_cfg->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; if (vdsc_cfg->native_420) { static const s8 ofs_und4[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; static const s8 ofs_und5[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; static const s8 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; static const s8 ofs_und8[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; bpp_i = bpp - 8; for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { u8 range_bpg_offset; intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i); /* Calculate range_bpg_offset */ if (bpp <= 8) { range_bpg_offset = ofs_und4[buf_i]; } else if (bpp <= 10) { res = DIV_ROUND_UP(((bpp - 8) * (ofs_und5[buf_i] - ofs_und4[buf_i])), 2); range_bpg_offset = ofs_und4[buf_i] + res; } else if (bpp <= 12) { res = DIV_ROUND_UP(((bpp - 10) * (ofs_und6[buf_i] - ofs_und5[buf_i])), 2); range_bpg_offset = ofs_und5[buf_i] + res; } else if (bpp <= 16) { res = DIV_ROUND_UP(((bpp - 12) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 4); range_bpg_offset = ofs_und6[buf_i] + res; } else { range_bpg_offset = ofs_und8[buf_i]; } vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } } else { static const s8 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; static const s8 ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; static const s8 ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; static const s8 ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; bpp_i = (2 * (bpp - 6)); for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { u8 range_bpg_offset; intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i); /* Calculate range_bpg_offset */ if (bpp <= 6) { range_bpg_offset = ofs_und6[buf_i]; } else if (bpp <= 8) { res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2); range_bpg_offset = ofs_und6[buf_i] + res; } else if (bpp <= 12) { range_bpg_offset = ofs_und8[buf_i]; } else if (bpp <= 15) { res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3); range_bpg_offset = ofs_und12[buf_i] + res; } else { range_bpg_offset = ofs_und15[buf_i]; } vdsc_cfg->rc_range_params[buf_i].range_bpg_offset = range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } } } static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config, struct drm_dsc_config *vdsc_cfg) { if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_RGB || pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { if (vdsc_cfg->slice_height > 4095) return -EINVAL; if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 15000) return -EINVAL; } else if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { if (vdsc_cfg->slice_width % 2) return -EINVAL; if (vdsc_cfg->slice_height % 2) return -EINVAL; if (vdsc_cfg->slice_height > 4094) return -EINVAL; if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 30000) return -EINVAL; } return 0; } int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; u16 compressed_bpp = pipe_config->dsc.compressed_bpp; int err; int ret; vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, pipe_config->dsc.slice_count); err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg); if (err) { drm_dbg_kms(&dev_priv->drm, "Slice dimension requirements not met\n"); return err; } /* * According to DSC 1.2 specs if colorspace is YCbCr then convert_rgb is 0 * else 1 */ vdsc_cfg->convert_rgb = pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR444; if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) vdsc_cfg->native_420 = true; /* We do not support YcBCr422 as of now */ vdsc_cfg->native_422 = false; vdsc_cfg->simple_422 = false; /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; /* Gen 11 only supports integral values of bpp */ vdsc_cfg->bits_per_pixel = compressed_bpp << 4; /* * According to DSC 1.2 specs in Section 4.1 if native_420 is set * we need to double the current bpp. */ if (vdsc_cfg->native_420) vdsc_cfg->bits_per_pixel <<= 1; vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; drm_dsc_set_rc_buf_thresh(vdsc_cfg); /* * From XE_LPD onwards we supports compression bpps in steps of 1 * upto uncompressed bpp-1, hence add calculations for all the rc * parameters */ if (DISPLAY_VER(dev_priv) >= 13) { calculate_rc_params(vdsc_cfg); } else { if ((compressed_bpp == 8 || compressed_bpp == 12) && (vdsc_cfg->bits_per_component == 8 || vdsc_cfg->bits_per_component == 10 || vdsc_cfg->bits_per_component == 12)) ret = drm_dsc_setup_rc_params(vdsc_cfg, DRM_DSC_1_1_PRE_SCR); else ret = drm_dsc_setup_rc_params(vdsc_cfg, DRM_DSC_1_2_444); if (ret) return ret; } /* * BitsPerComponent value determines mux_word_size: * When BitsPerComponent is less than or 10bpc, muxWordSize will be equal to * 48 bits otherwise 64 */ if (vdsc_cfg->bits_per_component <= 10) vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; else vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC; /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); return 0; } enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * VDSC/joining uses a separate power well, PW2, and requires * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases: * * - ICL eDP/DSI transcoder * - Display version 12 (except RKL) pipe A * * For any other pipe, VDSC/joining uses the power well associated with * the pipe in use. Hence another reference on the pipe power domain * will suffice. (Except no VDSC/joining on ICL pipe A.) */ if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; else if (is_pipe_dsc(crtc, cpu_transcoder)) return POWER_DOMAIN_PIPE(pipe); else return POWER_DOMAIN_TRANSCODER_VDSC_PW2; } int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state) { int num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; if (crtc_state->bigjoiner_pipes) num_vdsc_instances *= 2; return num_vdsc_instances; } static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; u32 pps_val = 0; u32 rc_buf_thresh_dword[4]; u32 rc_range_params_dword[8]; int i = 0; int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state); /* Populate PICTURE_PARAMETER_SET_0 registers */ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor << DSC_VER_MIN_SHIFT | vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; if (vdsc_cfg->dsc_version_minor == 2) { pps_val |= DSC_ALT_ICH_SEL; if (vdsc_cfg->native_420) pps_val |= DSC_NATIVE_420_ENABLE; if (vdsc_cfg->native_422) pps_val |= DSC_NATIVE_422_ENABLE; } if (vdsc_cfg->block_pred_enable) pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) pps_val |= DSC_COLOR_SPACE_CONVERSION; if (vdsc_cfg->simple_422) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_1 registers */ pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_2 registers */ pps_val = 0; pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_3 registers */ pps_val = 0; pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_4 registers */ pps_val = 0; pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_5 registers */ pps_val = 0; pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_6 registers */ pps_val = 0; pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) | DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_7 registers */ pps_val = 0; pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_8 registers */ pps_val = 0; pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_9 registers */ pps_val = 0; pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), pps_val); } /* Populate PICTURE_PARAMETER_SET_10 registers */ pps_val = 0; pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) | DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_10, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), pps_val); } /* Populate Picture parameter set 16 */ pps_val = 0; pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) | DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) / vdsc_cfg->slice_width) | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_16, pps_val); } else { intel_de_write(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), pps_val); } if (DISPLAY_VER(dev_priv) >= 14) { /* Populate PICTURE_PARAMETER_SET_17 registers */ pps_val = 0; pps_val |= DSC_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset); drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val); intel_de_write(dev_priv, MTL_DSC0_PICTURE_PARAMETER_SET_17(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, MTL_DSC1_PICTURE_PARAMETER_SET_17(pipe), pps_val); /* Populate PICTURE_PARAMETER_SET_18 registers */ pps_val = 0; pps_val |= DSC_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) | DSC_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj); drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val); intel_de_write(dev_priv, MTL_DSC0_PICTURE_PARAMETER_SET_18(pipe), pps_val); if (crtc_state->dsc.dsc_split) intel_de_write(dev_priv, MTL_DSC1_PICTURE_PARAMETER_SET_18(pipe), pps_val); } /* Populate the RC_BUF_THRESH registers */ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword)); for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, rc_buf_thresh_dword[i / 4]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); if (crtc_state->dsc.dsc_split) { intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); } } else { intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe), rc_buf_thresh_dword[0]); intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe), rc_buf_thresh_dword[1]); intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe), rc_buf_thresh_dword[2]); intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), rc_buf_thresh_dword[3]); if (crtc_state->dsc.dsc_split) { intel_de_write(dev_priv, ICL_DSC1_RC_BUF_THRESH_0(pipe), rc_buf_thresh_dword[0]); intel_de_write(dev_priv, ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), rc_buf_thresh_dword[1]); intel_de_write(dev_priv, ICL_DSC1_RC_BUF_THRESH_1(pipe), rc_buf_thresh_dword[2]); intel_de_write(dev_priv, ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe), rc_buf_thresh_dword[3]); } } /* Populate the RC_RANGE_PARAMETERS registers */ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword)); for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { rc_range_params_dword[i / 2] |= (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset << RC_BPG_OFFSET_SHIFT) | (vdsc_cfg->rc_range_params[i].range_max_qp << RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, rc_range_params_dword[i / 2]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW, rc_range_params_dword[1]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1, rc_range_params_dword[2]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW, rc_range_params_dword[3]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2, rc_range_params_dword[4]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW, rc_range_params_dword[5]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3, rc_range_params_dword[6]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW, rc_range_params_dword[7]); if (crtc_state->dsc.dsc_split) { intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0_UDW, rc_range_params_dword[1]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1, rc_range_params_dword[2]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1_UDW, rc_range_params_dword[3]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2, rc_range_params_dword[4]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2_UDW, rc_range_params_dword[5]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3, rc_range_params_dword[6]); intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3_UDW, rc_range_params_dword[7]); } } else { intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe), rc_range_params_dword[0]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe), rc_range_params_dword[1]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe), rc_range_params_dword[2]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe), rc_range_params_dword[3]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe), rc_range_params_dword[4]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe), rc_range_params_dword[5]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe), rc_range_params_dword[6]); intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), rc_range_params_dword[7]); if (crtc_state->dsc.dsc_split) { intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), rc_range_params_dword[0]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), rc_range_params_dword[1]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe), rc_range_params_dword[2]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe), rc_range_params_dword[3]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe), rc_range_params_dword[4]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe), rc_range_params_dword[5]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe), rc_range_params_dword[6]); intel_de_write(dev_priv, ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe), rc_range_params_dword[7]); } } } void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; struct drm_dsc_picture_parameter_set pps; enum port port; if (!crtc_state->dsc.compression_enable) return; drm_dsc_pps_payload_pack(&pps, vdsc_cfg); for_each_dsi_port(port, intel_dsi->ports) { dsi = intel_dsi->dsi_hosts[port]->device; mipi_dsi_picture_parameter_set(dsi, &pps); mipi_dsi_compression_mode(dsi, true); } } void intel_dsc_dp_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; if (!crtc_state->dsc.compression_enable) return; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg); dig_port->write_infoframe(encoder, crtc_state, DP_SDP_PPS, &dp_dsc_pps_sdp, sizeof(dp_dsc_pps_sdp)); } static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { return is_pipe_dsc(crtc, cpu_transcoder) ? ICL_PIPE_DSS_CTL1(crtc->pipe) : DSS_CTL1; } static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { return is_pipe_dsc(crtc, cpu_transcoder) ? ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2; } void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dss_ctl1_val = 0; if (crtc_state->bigjoiner_pipes && !crtc_state->dsc.compression_enable) { if (intel_crtc_is_bigjoiner_slave(crtc_state)) dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE; else dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER; intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); } } void intel_dsc_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dss_ctl1_val = 0; u32 dss_ctl2_val = 0; if (!crtc_state->dsc.compression_enable) return; intel_dsc_pps_configure(crtc_state); dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; dss_ctl1_val |= JOINER_ENABLE; } if (crtc_state->bigjoiner_pipes) { dss_ctl1_val |= BIG_JOINER_ENABLE; if (!intel_crtc_is_bigjoiner_slave(crtc_state)) dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE; } intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val); } void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* Disable only if either of them is enabled */ if (old_crtc_state->dsc.compression_enable || old_crtc_state->bigjoiner_pipes) { intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0); intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0); } } void intel_dsc_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; u32 dss_ctl1, dss_ctl2, pps0 = 0, pps1 = 0; if (!intel_dsc_source_support(crtc_state)) return; power_domain = intel_dsc_power_domain(crtc, cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return; dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder)); dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder)); crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; if (!crtc_state->dsc.compression_enable) goto out; crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) && (dss_ctl1 & JOINER_ENABLE); /* FIXME: add more state readout as needed */ /* PPS0 & PPS1 */ if (!is_pipe_dsc(crtc, cpu_transcoder)) { pps1 = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); } else { pps0 = intel_de_read(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe)); pps1 = intel_de_read(dev_priv, ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); } vdsc_cfg->bits_per_pixel = pps1; if (pps0 & DSC_NATIVE_420_ENABLE) vdsc_cfg->bits_per_pixel >>= 1; crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; out: intel_display_power_put(dev_priv, power_domain, wakeref); }
linux-master
drivers/gpu/drm/i915/display/intel_vdsc.c
/* * Copyright © 2007 Dave Mueller * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Mueller <[email protected]> * */ #include "intel_display_types.h" #include "intel_dvo_dev.h" /* register definitions according to the TFP410 data sheet */ #define TFP410_VID 0x014C #define TFP410_DID 0x0410 #define TFP410_VID_LO 0x00 #define TFP410_VID_HI 0x01 #define TFP410_DID_LO 0x02 #define TFP410_DID_HI 0x03 #define TFP410_REV 0x04 #define TFP410_CTL_1 0x08 #define TFP410_CTL_1_TDIS (1<<6) #define TFP410_CTL_1_VEN (1<<5) #define TFP410_CTL_1_HEN (1<<4) #define TFP410_CTL_1_DSEL (1<<3) #define TFP410_CTL_1_BSEL (1<<2) #define TFP410_CTL_1_EDGE (1<<1) #define TFP410_CTL_1_PD (1<<0) #define TFP410_CTL_2 0x09 #define TFP410_CTL_2_VLOW (1<<7) #define TFP410_CTL_2_MSEL_MASK (0x7<<4) #define TFP410_CTL_2_MSEL (1<<4) #define TFP410_CTL_2_TSEL (1<<3) #define TFP410_CTL_2_RSEN (1<<2) #define TFP410_CTL_2_HTPLG (1<<1) #define TFP410_CTL_2_MDI (1<<0) #define TFP410_CTL_3 0x0A #define TFP410_CTL_3_DK_MASK (0x7<<5) #define TFP410_CTL_3_DK (1<<5) #define TFP410_CTL_3_DKEN (1<<4) #define TFP410_CTL_3_CTL_MASK (0x7<<1) #define TFP410_CTL_3_CTL (1<<1) #define TFP410_USERCFG 0x0B #define TFP410_DE_DLY 0x32 #define TFP410_DE_CTL 0x33 #define TFP410_DE_CTL_DEGEN (1<<6) #define TFP410_DE_CTL_VSPOL (1<<5) #define TFP410_DE_CTL_HSPOL (1<<4) #define TFP410_DE_CTL_DEDLY8 (1<<0) #define TFP410_DE_TOP 0x34 #define TFP410_DE_CNT_LO 0x36 #define TFP410_DE_CNT_HI 0x37 #define TFP410_DE_LIN_LO 0x38 #define TFP410_DE_LIN_HI 0x39 #define TFP410_H_RES_LO 0x3A #define TFP410_H_RES_HI 0x3B #define TFP410_V_RES_LO 0x3C #define TFP410_V_RES_HI 0x3D struct tfp410_priv { bool quiet; }; static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } static int tfp410_getid(struct intel_dvo_device *dvo, int addr) { u8 ch1, ch2; if (tfp410_readb(dvo, addr+0, &ch1) && tfp410_readb(dvo, addr+1, &ch2)) return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); return -1; } /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; int id; tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); if (tfp == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; return true; out: kfree(tfp); return false; } static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) { enum drm_connector_status ret = connector_status_disconnected; u8 ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { if (ctl2 & TFP410_CTL_2_RSEN) ret = connector_status_connected; else ret = connector_status_disconnected; } return ret; } static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void tfp410_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock dependencies * in the mode setup, we can just leave the registers alone and everything * will work fine. */ /* don't do much */ return; } /* set the tfp410 power state */ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) { u8 ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; if (enable) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) { u8 ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return false; if (ctl1 & TFP410_CTL_1_PD) return true; else return false; } static void tfp410_dump_regs(struct intel_dvo_device *dvo) { u8 val, val2; tfp410_readb(dvo, TFP410_REV, &val); DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_1, &val); DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_2, &val); DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_3, &val); DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val); tfp410_readb(dvo, TFP410_USERCFG, &val); DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_DLY, &val); DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CTL, &val); DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_TOP, &val); DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_H_RES_LO, &val); tfp410_readb(dvo, TFP410_H_RES_HI, &val2); DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_V_RES_LO, &val); tfp410_readb(dvo, TFP410_V_RES_HI, &val2); DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } static void tfp410_destroy(struct intel_dvo_device *dvo) { struct tfp410_priv *tfp = dvo->dev_priv; if (tfp) { kfree(tfp); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, .get_hw_state = tfp410_get_hw_state, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_tfp410.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ /** * DOC: display pinning helpers */ #include "gem/i915_gem_domain.h" #include "gem/i915_gem_object.h" #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_pin.h" static struct i915_vma * intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, const struct i915_gtt_view *view, bool uses_fence, unsigned long *out_flags, struct i915_address_space *vm) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_gem_ww_ctx ww; struct i915_vma *vma; u32 alignment; int ret; /* * We are not syncing against the binding (and potential migrations) * below, so this vm must never be async. */ GEM_WARN_ON(vm->bind_async_flags); if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) return ERR_PTR(-EINVAL); alignment = 4096 * 512; atomic_inc(&dev_priv->gpu_error.pending_fb_pin); for_i915_gem_ww(&ww, ret, true) { ret = i915_gem_object_lock(obj, &ww); if (ret) continue; if (HAS_LMEM(dev_priv)) { unsigned int flags = obj->flags; /* * For this type of buffer we need to able to read from the CPU * the clear color value found in the buffer, hence we need to * ensure it is always in the mappable part of lmem, if this is * a small-bar device. */ if (intel_fb_rc_ccs_cc_plane(fb) >= 0) flags &= ~I915_BO_ALLOC_GPU_ONLY; ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0, flags); if (ret) continue; } ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); if (ret) continue; vma = i915_vma_instance(obj, vm, view); if (IS_ERR(vma)) { ret = PTR_ERR(vma); continue; } if (i915_vma_misplaced(vma, 0, alignment, 0)) { ret = i915_vma_unbind(vma); if (ret) continue; } ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL); if (ret) continue; } if (ret) { vma = ERR_PTR(ret); goto err; } vma->display_alignment = max(vma->display_alignment, alignment); i915_gem_object_flush_if_display(obj); i915_vma_get(vma); err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); return vma; } struct i915_vma * intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor, const struct i915_gtt_view *view, bool uses_fence, unsigned long *out_flags) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); intel_wakeref_t wakeref; struct i915_gem_ww_ctx ww; struct i915_vma *vma; unsigned int pinctl; u32 alignment; int ret; if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) return ERR_PTR(-EINVAL); if (phys_cursor) alignment = intel_cursor_alignment(dev_priv); else alignment = intel_surf_alignment(fb, 0); if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) return ERR_PTR(-EINVAL); /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so * we should always have valid PTE following the scanout preventing * the VT-d warning. */ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) alignment = 256 * 1024; /* * Global gtt pte registers are special registers which actually forward * writes to a chunk of system memory. Which means that there is no risk * that the register values disappear as soon as we call * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); /* * Valleyview is definitely limited to scanning out the first * 512MiB. Lets presume this behaviour was inherited from the * g4x display engine and that all earlier gen are similarly * limited. Testing suggests that it is a little more * complicated than this. For example, Cherryview appears quite * happy to scanout from anywhere within its global aperture. */ pinctl = 0; if (HAS_GMCH(dev_priv)) pinctl |= PIN_MAPPABLE; i915_gem_ww_ctx_init(&ww, true); retry: ret = i915_gem_object_lock(obj, &ww); if (!ret && phys_cursor) ret = i915_gem_object_attach_phys(obj, alignment); else if (!ret && HAS_LMEM(dev_priv)) ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0); if (!ret) ret = i915_gem_object_pin_pages(obj); if (ret) goto err; vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, view, pinctl); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unpin; } if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { /* * Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always, when * possible, install a fence as the cost is not that onerous. * * If we fail to fence the tiled scanout, then either the * modeset will reject the change (which is highly unlikely as * the affected systems, all but one, do not have unmappable * space) or we will not be able to enable full powersaving * techniques (also likely not to apply due to various limits * FBC and the like impose on the size of the buffer, which * presumably we violated anyway with this unmappable buffer). * Anyway, it is presumably better to stumble onwards with * something and try to run the system in a "less than optimal" * mode that matches the user configuration. */ ret = i915_vma_pin_fence(vma); if (ret != 0 && DISPLAY_VER(dev_priv) < 4) { i915_vma_unpin(vma); goto err_unpin; } ret = 0; if (vma->fence) *out_flags |= PLANE_HAS_FENCE; } i915_vma_get(vma); err_unpin: i915_gem_object_unpin_pages(obj); err: if (ret == -EDEADLK) { ret = i915_gem_ww_ctx_backoff(&ww); if (!ret) goto retry; } i915_gem_ww_ctx_fini(&ww); if (ret) vma = ERR_PTR(ret); atomic_dec(&dev_priv->gpu_error.pending_fb_pin); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return vma; } void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); i915_vma_unpin(vma); i915_vma_put(vma); } int intel_plane_pin_fb(struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_framebuffer *fb = plane_state->hw.fb; struct i915_vma *vma; bool phys_cursor = plane->id == PLANE_CURSOR && DISPLAY_INFO(dev_priv)->cursor_needs_physical; if (!intel_fb_uses_dpt(fb)) { vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, &plane_state->view.gtt, intel_plane_uses_fence(plane_state), &plane_state->flags); if (IS_ERR(vma)) return PTR_ERR(vma); plane_state->ggtt_vma = vma; } else { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); vma = intel_dpt_pin(intel_fb->dpt_vm); if (IS_ERR(vma)) return PTR_ERR(vma); plane_state->ggtt_vma = vma; vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false, &plane_state->flags, intel_fb->dpt_vm); if (IS_ERR(vma)) { intel_dpt_unpin(intel_fb->dpt_vm); plane_state->ggtt_vma = NULL; return PTR_ERR(vma); } plane_state->dpt_vma = vma; WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma); } return 0; } void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) { struct drm_framebuffer *fb = old_plane_state->hw.fb; struct i915_vma *vma; if (!intel_fb_uses_dpt(fb)) { vma = fetch_and_zero(&old_plane_state->ggtt_vma); if (vma) intel_unpin_fb_vma(vma, old_plane_state->flags); } else { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); vma = fetch_and_zero(&old_plane_state->dpt_vma); if (vma) intel_unpin_fb_vma(vma, old_plane_state->flags); vma = fetch_and_zero(&old_plane_state->ggtt_vma); if (vma) intel_dpt_unpin(intel_fb->dpt_vm); } }
linux-master
drivers/gpu/drm/i915/display/intel_fb_pin.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include "i915_reg.h" #include "i9xx_plane.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" #include "intel_sprite.h" /* Primary plane formats for gen <= 3 */ static const u32 i8xx_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_XRGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, }; /* Primary plane formats for ivb (no fp16 due to hw issue) */ static const u32 ivb_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, }; /* Primary plane formats for gen >= 4, except ivb */ static const u32 i965_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XBGR16161616F, }; /* Primary plane formats for vlv/chv */ static const u32 vlv_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_XBGR16161616F, }; static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XRGB8888: return modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED; default: return false; } } static bool i965_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XBGR16161616F: return modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED; default: return false; } } static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (!HAS_FBC(dev_priv)) return false; if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return i9xx_plane == PLANE_A; /* tied to pipe A */ else if (IS_IVYBRIDGE(dev_priv)) return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || i9xx_plane == PLANE_C; else if (DISPLAY_VER(dev_priv) >= 4) return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; else return i9xx_plane == PLANE_A; } static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) return dev_priv->display.fbc[INTEL_FBC_A]; else return NULL; } static bool i9xx_plane_has_windowing(struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; if (IS_CHERRYVIEW(dev_priv)) return i9xx_plane == PLANE_B; else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) return false; else if (DISPLAY_VER(dev_priv) == 4) return i9xx_plane == PLANE_C; else return i9xx_plane == PLANE_B || i9xx_plane == PLANE_C; } static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; u32 dspcntr; dspcntr = DISP_ENABLE; if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) dspcntr |= DISP_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_C8: dspcntr |= DISP_FORMAT_8BPP; break; case DRM_FORMAT_XRGB1555: dspcntr |= DISP_FORMAT_BGRX555; break; case DRM_FORMAT_ARGB1555: dspcntr |= DISP_FORMAT_BGRA555; break; case DRM_FORMAT_RGB565: dspcntr |= DISP_FORMAT_BGRX565; break; case DRM_FORMAT_XRGB8888: dspcntr |= DISP_FORMAT_BGRX888; break; case DRM_FORMAT_XBGR8888: dspcntr |= DISP_FORMAT_RGBX888; break; case DRM_FORMAT_ARGB8888: dspcntr |= DISP_FORMAT_BGRA888; break; case DRM_FORMAT_ABGR8888: dspcntr |= DISP_FORMAT_RGBA888; break; case DRM_FORMAT_XRGB2101010: dspcntr |= DISP_FORMAT_BGRX101010; break; case DRM_FORMAT_XBGR2101010: dspcntr |= DISP_FORMAT_RGBX101010; break; case DRM_FORMAT_ARGB2101010: dspcntr |= DISP_FORMAT_BGRA101010; break; case DRM_FORMAT_ABGR2101010: dspcntr |= DISP_FORMAT_RGBA101010; break; case DRM_FORMAT_XBGR16161616F: dspcntr |= DISP_FORMAT_RGBX161616; break; default: MISSING_CASE(fb->format->format); return 0; } if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISP_TILED; if (rotation & DRM_MODE_ROTATE_180) dspcntr |= DISP_ROTATE_180; if (rotation & DRM_MODE_REFLECT_X) dspcntr |= DISP_MIRROR; return dspcntr; } int i9xx_check_plane_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; int src_x, src_y, src_w; u32 offset; int ret; ret = intel_plane_compute_gtt(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_x = plane_state->uapi.src.x1 >> 16; src_y = plane_state->uapi.src.y1 >> 16; /* Undocumented hardware limit on i965/g4x/vlv/chv */ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) return -EINVAL; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); if (DISPLAY_VER(dev_priv) >= 4) offset = intel_plane_compute_aligned_offset(&src_x, &src_y, plane_state, 0); else offset = 0; /* * When using an X-tiled surface the plane starts to * misbehave if the x offset + width exceeds the stride. * hsw/bdw: underrun galore * ilk/snb/ivb: wrap to the next tile row mid scanout * i965/g4x: so far appear immune to this * vlv/chv: TODO check * * Linear surfaces seem to work just fine, even on hsw/bdw * despite them not using the linear offset anymore. */ if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) { u32 alignment = intel_surf_alignment(fb, 0); int cpp = fb->format->cpp[0]; while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); return -EINVAL; } offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0, offset, offset - alignment); } } /* * Put the final coordinates back so that the src * coordinate checks will see the right values. */ drm_rect_translate_to(&plane_state->uapi.src, src_x << 16, src_y << 16); /* HSW/BDW do this automagically in hardware */ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { unsigned int rotation = plane_state->hw.rotation; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; if (rotation & DRM_MODE_ROTATE_180) { src_x += src_w - 1; src_y += src_h - 1; } else if (rotation & DRM_MODE_REFLECT_X) { src_x += src_w - 1; } } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095); } else if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) { drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095); } plane_state->view.color_plane[0].offset = offset; plane_state->view.color_plane[0].x = src_x; plane_state->view.color_plane[0].y = src_y; return 0; } static int i9xx_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); int ret; ret = chv_plane_check_rotation(plane_state); if (ret) return ret; ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, i9xx_plane_has_windowing(plane)); if (ret) return ret; ret = i9xx_check_plane_surface(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); if (ret) return ret; plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); return 0; } static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dspcntr = 0; if (crtc_state->gamma_enable) dspcntr |= DISP_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) dspcntr |= DISP_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5) dspcntr |= DISP_PIPE_SEL(crtc->pipe); return dspcntr; } static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, unsigned int *num, unsigned int *den) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp = fb->format->cpp[0]; /* * g4x bspec says 64bpp pixel rate can't exceed 80% * of cdclk when the sprite plane is enabled on the * same pipe. ilk/snb bspec says 64bpp pixel rate is * never allowed to exceed 80% of cdclk. Let's just go * with the ilk/snb limit always. */ if (cpp == 8) { *num = 10; *den = 8; } else { *num = 1; *den = 1; } } static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { unsigned int pixel_rate; unsigned int num, den; /* * Note that crtc_state->pixel_rate accounts for both * horizontal and vertical panel fitter downscaling factors. * Pre-HSW bspec tells us to only consider the horizontal * downscaling factor here. We ignore that and just consider * both for simplicity. */ pixel_rate = crtc_state->pixel_rate; i9xx_plane_ratio(crtc_state, plane_state, &num, &den); /* two pixels per clock with double wide pipe */ if (crtc_state->double_wide) den *= 2; return DIV_ROUND_UP(pixel_rate * num, den); } static void i9xx_plane_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), plane_state->view.color_plane[0].mapping_stride); if (DISPLAY_VER(dev_priv) < 4) { int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; int crtc_w = drm_rect_width(&plane_state->uapi.dst); int crtc_h = drm_rect_height(&plane_state->uapi.dst); /* * PLANE_A doesn't actually have a full window * generator but let's assume we still need to * program whatever is there. */ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1)); } } static void i9xx_plane_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; int x = plane_state->view.color_plane[0].x; int y = plane_state->view.color_plane[0].y; u32 dspcntr, dspaddr_offset, linear_offset; dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (DISPLAY_VER(dev_priv) >= 4) dspaddr_offset = plane_state->view.color_plane[0].offset; else dspaddr_offset = linear_offset; if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; int crtc_w = drm_rect_width(&plane_state->uapi.dst); int crtc_h = drm_rect_height(&plane_state->uapi.dst); intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } else if (DISPLAY_VER(dev_priv) >= 4) { intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), linear_offset); intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); if (DISPLAY_VER(dev_priv) >= 4) intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); else intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } static void i830_plane_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { /* * On i830/i845 all registers are self-arming [ALM040]. * * Additional breakage on i830 causes register reads to return * the last latched value instead of the last written value [ALM026]. */ i9xx_plane_update_noarm(plane, crtc_state, plane_state); i9xx_plane_update_arm(plane, crtc_state, plane_state); } static void i9xx_plane_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 dspcntr; /* * DSPCNTR pipe gamma enable on g4x+ and pipe csc * enable on ilk+ affect the pipe bottom color as * well, so we must configure them even if the plane * is disabled. * * On pre-g4x there is no way to gamma correct the * pipe bottom color but we'll keep on doing this * anyway so that the crtc state readout works correctly. */ dspcntr = i9xx_plane_ctl_crtc(crtc_state); intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); if (DISPLAY_VER(dev_priv) >= 4) intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); else intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); } static void g4x_primary_async_flip(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); u32 dspaddr_offset = plane_state->view.color_plane[0].offset; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; if (async_flip) dspcntr |= DISP_ASYNC_FLIP; intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } static void vlv_primary_async_flip(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 dspaddr_offset = plane_state->view.color_plane[0].offset; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } static void bdw_primary_enable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void bdw_primary_disable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_enable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_disable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_enable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_disable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_enable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_disable_flip_done(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } static bool i9xx_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; intel_wakeref_t wakeref; bool ret; u32 val; /* * Not 100% correct for planes that can move between pipes, * but that's only the case for gen2-4 which don't have any * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); ret = val & DISP_ENABLE; if (DISPLAY_VER(dev_priv) >= 5) *pipe = plane->pipe; else *pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static unsigned int hsw_primary_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */ return min(8192 * cpp, 32 * 1024); } static unsigned int ilk_primary_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ if (modifier == I915_FORMAT_MOD_X_TILED) return min(4096 * cpp, 32 * 1024); else return 32 * 1024; } unsigned int i965_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { const struct drm_format_info *info = drm_format_info(pixel_format); int cpp = info->cpp[0]; /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */ if (modifier == I915_FORMAT_MOD_X_TILED) return min(4096 * cpp, 16 * 1024); else return 32 * 1024; } static unsigned int i9xx_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); if (DISPLAY_VER(dev_priv) >= 3) { if (modifier == I915_FORMAT_MOD_X_TILED) return 8*1024; else return 16*1024; } else { if (plane->i9xx_plane == PLANE_C) return 4*1024; else return 8*1024; } } static const struct drm_plane_funcs i965_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = i965_plane_format_mod_supported, }; static const struct drm_plane_funcs i8xx_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = i8xx_plane_format_mod_supported, }; struct intel_plane * intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; plane = intel_plane_alloc(); if (IS_ERR(plane)) return plane; plane->pipe = pipe; /* * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS * port is hooked to pipe B. Hence we want plane A feeding pipe B. */ if (HAS_FBC(dev_priv) && DISPLAY_VER(dev_priv) < 4 && INTEL_NUM_PIPES(dev_priv) == 2) plane->i9xx_plane = (enum i9xx_plane_id) !pipe; else plane->i9xx_plane = (enum i9xx_plane_id) pipe; plane->id = PLANE_PRIMARY; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { formats = vlv_primary_formats; num_formats = ARRAY_SIZE(vlv_primary_formats); } else if (DISPLAY_VER(dev_priv) >= 4) { /* * WaFP16GammaEnabling:ivb * "Workaround : When using the 64-bit format, the plane * output on each color channel has one quarter amplitude. * It can be brought up to full amplitude by using pipe * gamma correction or pipe color space conversion to * multiply the plane output by four." * * There is no dedicated plane gamma for the primary plane, * and using the pipe gamma/csc could conflict with other * planes, so we choose not to expose fp16 on IVB primary * planes. HSW primary planes no longer have this problem. */ if (IS_IVYBRIDGE(dev_priv)) { formats = ivb_primary_formats; num_formats = ARRAY_SIZE(ivb_primary_formats); } else { formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); } } else { formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); } if (DISPLAY_VER(dev_priv) >= 4) plane_funcs = &i965_plane_funcs; else plane_funcs = &i8xx_plane_funcs; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) plane->min_cdclk = vlv_plane_min_cdclk; else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) plane->min_cdclk = hsw_plane_min_cdclk; else if (IS_IVYBRIDGE(dev_priv)) plane->min_cdclk = ivb_plane_min_cdclk; else plane->min_cdclk = i9xx_plane_min_cdclk; if (HAS_GMCH(dev_priv)) { if (DISPLAY_VER(dev_priv) >= 4) plane->max_stride = i965_plane_max_stride; else plane->max_stride = i9xx_plane_max_stride; } else { if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) plane->max_stride = hsw_primary_max_stride; else plane->max_stride = ilk_primary_max_stride; } if (IS_I830(dev_priv) || IS_I845G(dev_priv)) { plane->update_arm = i830_plane_update_arm; } else { plane->update_noarm = i9xx_plane_update_noarm; plane->update_arm = i9xx_plane_update_arm; } plane->disable_arm = i9xx_plane_disable_arm; plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { plane->async_flip = vlv_primary_async_flip; plane->enable_flip_done = vlv_primary_enable_flip_done; plane->disable_flip_done = vlv_primary_disable_flip_done; } else if (IS_BROADWELL(dev_priv)) { plane->need_async_flip_disable_wa = true; plane->async_flip = g4x_primary_async_flip; plane->enable_flip_done = bdw_primary_enable_flip_done; plane->disable_flip_done = bdw_primary_disable_flip_done; } else if (DISPLAY_VER(dev_priv) >= 7) { plane->async_flip = g4x_primary_async_flip; plane->enable_flip_done = ivb_primary_enable_flip_done; plane->disable_flip_done = ivb_primary_disable_flip_done; } else if (DISPLAY_VER(dev_priv) >= 5) { plane->async_flip = g4x_primary_async_flip; plane->enable_flip_done = ilk_primary_enable_flip_done; plane->disable_flip_done = ilk_primary_disable_flip_done; } modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(plane->i9xx_plane)); kfree(modifiers); if (ret) goto fail; if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_X; } else if (DISPLAY_VER(dev_priv) >= 4) { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; } else { supported_rotations = DRM_MODE_ROTATE_0; } if (DISPLAY_VER(dev_priv) >= 4) drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, supported_rotations); zpos = 0; drm_plane_create_zpos_immutable_property(&plane->base, zpos); intel_plane_helper_add(plane); return plane; fail: intel_plane_free(plane); return ERR_PTR(ret); } static int i9xx_format_to_fourcc(int format) { switch (format) { case DISP_FORMAT_8BPP: return DRM_FORMAT_C8; case DISP_FORMAT_BGRA555: return DRM_FORMAT_ARGB1555; case DISP_FORMAT_BGRX555: return DRM_FORMAT_XRGB1555; case DISP_FORMAT_BGRX565: return DRM_FORMAT_RGB565; default: case DISP_FORMAT_BGRX888: return DRM_FORMAT_XRGB8888; case DISP_FORMAT_RGBX888: return DRM_FORMAT_XBGR8888; case DISP_FORMAT_BGRA888: return DRM_FORMAT_ARGB8888; case DISP_FORMAT_RGBA888: return DRM_FORMAT_ABGR8888; case DISP_FORMAT_BGRX101010: return DRM_FORMAT_XRGB2101010; case DISP_FORMAT_RGBX101010: return DRM_FORMAT_XBGR2101010; case DISP_FORMAT_BGRA101010: return DRM_FORMAT_ARGB2101010; case DISP_FORMAT_RGBA101010: return DRM_FORMAT_ABGR2101010; case DISP_FORMAT_RGBX161616: return DRM_FORMAT_XBGR16161616F; } } void i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; enum pipe pipe; u32 val, base, offset; int fourcc, pixel_format; unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; if (!plane->get_hw_state(plane, &pipe)) return; drm_WARN_ON(dev, pipe != crtc->pipe); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); return; } fb = &intel_fb->base; fb->dev = dev; val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (DISPLAY_VER(dev_priv) >= 4) { if (val & DISP_TILED) { plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; } if (val & DISP_ROTATE_180) plane_config->rotation = DRM_MODE_ROTATE_180; } if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && val & DISP_MIRROR) plane_config->rotation |= DRM_MODE_REFLECT_X; pixel_format = val & DISP_FORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); fb->format = drm_format_info(fourcc); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else if (DISPLAY_VER(dev_priv) >= 4) { if (plane_config->tiling) offset = intel_de_read(dev_priv, DSPTILEOFF(i9xx_plane)); else offset = intel_de_read(dev_priv, DSPLINOFF(i9xx_plane)); base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else { offset = 0; base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); } plane_config->base = base; drm_WARN_ON(&dev_priv->drm, offset != 0); val = intel_de_read(dev_priv, PIPESRC(pipe)); fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1; fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1; val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(fb, 0, fb->height); plane_config->size = fb->pitches[0] * aligned_height; drm_dbg_kms(&dev_priv->drm, "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", crtc->base.name, plane->base.name, fb->width, fb->height, fb->format->cpp[0] * 8, base, fb->pitches[0], plane_config->size); plane_config->fb = intel_fb; }
linux-master
drivers/gpu/drm/i915/display/i9xx_plane.c
/* * Copyright © 2012 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eugeni Dodonov <[email protected]> * */ #include <linux/string_helpers.h> #include <drm/display/drm_scdc_helper.h> #include <drm/drm_privacy_screen_consumer.h> #include "i915_drv.h" #include "i915_reg.h" #include "icl_dsi.h" #include "intel_audio.h" #include "intel_audio_regs.h" #include "intel_backlight.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_cx0_phy.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_hti.h" #include "intel_lspcon.h" #include "intel_mg_phy_regs.h" #include "intel_modeset_lock.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" #include "intel_snps_phy.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" static const u8 index_to_dp_signal_levels[] = { [0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0, [1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1, [2] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2, [3] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3, [4] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0, [5] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1, [6] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2, [7] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0, [8] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1, [9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0, }; static int intel_ddi_hdmi_level(struct intel_encoder *encoder, const struct intel_ddi_buf_trans *trans) { int level; level = intel_bios_hdmi_level_shift(encoder->devdata); if (level < 0) level = trans->hdmi_default_entry; return level; } static bool has_buf_trans_select(struct drm_i915_private *i915) { return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915); } static bool has_iboost(struct drm_i915_private *i915) { return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915); } /* * Starting with Haswell, DDI port buffers must be programmed with correct * values in advance. This function programs the correct values for * DP/eDP/FDI use cases. */ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int i, n_entries; enum port port = encoder->port; const struct intel_ddi_buf_trans *trans; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && intel_bios_dp_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i), trans->entries[i].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i), trans->entries[i].hsw.trans2); } } /* * Starting with Haswell, DDI port buffers must be programmed with correct * values in advance. This function programs the correct values for * HDMI/DVI use cases. */ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int level = intel_ddi_level(encoder, crtc_state, 0); u32 iboost_bit = 0; int n_entries; enum port port = encoder->port; const struct intel_ddi_buf_trans *trans; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && intel_bios_hdmi_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9), trans->entries[level].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9), trans->entries[level].hsw.trans2); } static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port) { int ret; /* FIXME: find out why Bspec's 100us timeout is too short */ ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE), 10000); if (ret) drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n", port_name(port)); } void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { if (IS_BROXTON(dev_priv)) { udelay(16); return; } if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n", port_name(port)); } static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, enum port port) { enum phy phy = intel_port_to_phy(dev_priv, port); int timeout_us; int ret; /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */ if (DISPLAY_VER(dev_priv) < 10) { usleep_range(518, 1000); return; } if (DISPLAY_VER(dev_priv) >= 14) { timeout_us = 10000; } else if (IS_DG2(dev_priv)) { timeout_us = 1200; } else if (DISPLAY_VER(dev_priv) >= 12) { if (intel_phy_is_tc(dev_priv, phy)) timeout_us = 3000; else timeout_us = 1000; } else { timeout_us = 500; } if (DISPLAY_VER(dev_priv) >= 14) ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE), timeout_us, 10, 10); else ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), timeout_us, 10, 10); if (ret) drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n", port_name(port)); } static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { switch (pll->info->id) { case DPLL_ID_WRPLL1: return PORT_CLK_SEL_WRPLL1; case DPLL_ID_WRPLL2: return PORT_CLK_SEL_WRPLL2; case DPLL_ID_SPLL: return PORT_CLK_SEL_SPLL; case DPLL_ID_LCPLL_810: return PORT_CLK_SEL_LCPLL_810; case DPLL_ID_LCPLL_1350: return PORT_CLK_SEL_LCPLL_1350; case DPLL_ID_LCPLL_2700: return PORT_CLK_SEL_LCPLL_2700; default: MISSING_CASE(pll->info->id); return PORT_CLK_SEL_NONE; } } static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { const struct intel_shared_dpll *pll = crtc_state->shared_dpll; int clock = crtc_state->port_clock; const enum intel_dpll_id id = pll->info->id; switch (id) { default: /* * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used * here, so do warn if this get passed in */ MISSING_CASE(id); return DDI_CLK_SEL_NONE; case DPLL_ID_ICL_TBTPLL: switch (clock) { case 162000: return DDI_CLK_SEL_TBT_162; case 270000: return DDI_CLK_SEL_TBT_270; case 540000: return DDI_CLK_SEL_TBT_540; case 810000: return DDI_CLK_SEL_TBT_810; default: MISSING_CASE(clock); return DDI_CLK_SEL_NONE; } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: case DPLL_ID_ICL_MGPLL3: case DPLL_ID_ICL_MGPLL4: case DPLL_ID_TGL_MGPLL5: case DPLL_ID_TGL_MGPLL6: return DDI_CLK_SEL_MG; } } static u32 ddi_buf_phy_link_rate(int port_clock) { switch (port_clock) { case 162000: return DDI_BUF_PHY_LINK_RATE(0); case 216000: return DDI_BUF_PHY_LINK_RATE(4); case 243000: return DDI_BUF_PHY_LINK_RATE(5); case 270000: return DDI_BUF_PHY_LINK_RATE(1); case 324000: return DDI_BUF_PHY_LINK_RATE(6); case 432000: return DDI_BUF_PHY_LINK_RATE(7); case 540000: return DDI_BUF_PHY_LINK_RATE(2); case 810000: return DDI_BUF_PHY_LINK_RATE(3); default: MISSING_CASE(port_clock); return DDI_BUF_PHY_LINK_RATE(0); } } static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(i915, encoder->port); /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */ intel_dp->DP = dig_port->saved_port_bits | DDI_PORT_WIDTH(crtc_state->lane_count) | DDI_BUF_TRANS_SELECT(0); if (DISPLAY_VER(i915) >= 14) { if (intel_dp_is_uhbr(crtc_state)) intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT; else intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT; } if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) { intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } } static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, enum port port) { u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; switch (val) { case DDI_CLK_SEL_NONE: return 0; case DDI_CLK_SEL_TBT_162: return 162000; case DDI_CLK_SEL_TBT_270: return 270000; case DDI_CLK_SEL_TBT_540: return 540000; case DDI_CLK_SEL_TBT_810: return 810000; default: MISSING_CASE(val); return 0; } } static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { /* CRT dotclock is determined via other means */ if (pipe_config->has_pch_encoder) return; pipe_config->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(pipe_config); } void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 temp; if (!intel_crtc_has_dp_encoder(crtc_state)) return; drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); temp = DP_MSA_MISC_SYNC_CLOCK; switch (crtc_state->pipe_bpp) { case 18: temp |= DP_MSA_MISC_6_BPC; break; case 24: temp |= DP_MSA_MISC_8_BPC; break; case 30: temp |= DP_MSA_MISC_10_BPC; break; case 36: temp |= DP_MSA_MISC_12_BPC; break; default: MISSING_CASE(crtc_state->pipe_bpp); break; } /* nonsense combination */ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->limited_color_range) temp |= DP_MSA_MISC_COLOR_CEA_RGB; /* * As per DP 1.2 spec section 2.3.4.3 while sending * YCBCR 444 signals we should program MSA MISC1/0 fields with * colorspace information. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709; /* * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication * of Color Encoding Format and Content Color Gamut] while sending * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format. */ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) temp |= DP_MSA_MISC_COLOR_VSC_SDP; intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp); } static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder) { if (master_transcoder == TRANSCODER_EDP) return 0; else return master_transcoder + 1; } static void intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; if (intel_dp_is_uhbr(crtc_state)) val = TRANS_DP2_128B132B_CHANNEL_CODING; intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val); } /* * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. * * Only intended to be used by intel_ddi_enable_transcoder_func() and * intel_ddi_config_transcoder_func(). */ static u32 intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; u32 temp; /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; if (DISPLAY_VER(dev_priv) >= 12) temp |= TGL_TRANS_DDI_SELECT_PORT(port); else temp |= TRANS_DDI_SELECT_PORT(port); switch (crtc_state->pipe_bpp) { default: MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: temp |= TRANS_DDI_BPC_6; break; case 24: temp |= TRANS_DDI_BPC_8; break; case 30: temp |= TRANS_DDI_BPC_10; break; case 36: temp |= TRANS_DDI_BPC_12; break; } if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DDI_PVSYNC; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DDI_PHSYNC; if (cpu_transcoder == TRANSCODER_EDP) { switch (pipe) { default: MISSING_CASE(pipe); fallthrough; case PIPE_A: /* On Haswell, can only use the always-on power well for * eDP when not using the panel fitter, and when not * using motion blur mitigation (which we don't * support). */ if (crtc_state->pch_pfit.force_thru) temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; else temp |= TRANS_DDI_EDP_INPUT_A_ON; break; case PIPE_B: temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; break; case PIPE_C: temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; break; } } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { if (crtc_state->has_hdmi_sink) temp |= TRANS_DDI_MODE_SELECT_HDMI; else temp |= TRANS_DDI_MODE_SELECT_DVI; if (crtc_state->hdmi_scrambling) temp |= TRANS_DDI_HDMI_SCRAMBLING; if (crtc_state->hdmi_high_tmds_clock_ratio) temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; if (DISPLAY_VER(dev_priv) >= 14) temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; temp |= (crtc_state->fdi_lanes - 1) << 1; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { if (intel_dp_is_uhbr(crtc_state)) temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; else temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); if (DISPLAY_VER(dev_priv) >= 12) { enum transcoder master; master = crtc_state->mst_master_transcoder; drm_WARN_ON(&dev_priv->drm, master == INVALID_TRANSCODER); temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); } } else { temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); } if (IS_DISPLAY_VER(dev_priv, 8, 10) && crtc_state->master_transcoder != INVALID_TRANSCODER) { u8 master_select = bdw_trans_port_sync_master_select(crtc_state->master_transcoder); temp |= TRANS_DDI_PORT_SYNC_ENABLE | TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select); } return temp; } void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) >= 11) { enum transcoder master_transcoder = crtc_state->master_transcoder; u32 ctl2 = 0; if (master_transcoder != INVALID_TRANSCODER) { u8 master_select = bdw_trans_port_sync_master_select(master_transcoder); ctl2 |= PORT_SYNC_MODE_ENABLE | PORT_SYNC_MODE_MASTER_SELECT(master_select); } intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2); } intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state)); } /* * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable * bit. */ static void intel_ddi_config_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 ctl; ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state); ctl &= ~TRANS_DDI_FUNC_ENABLE; intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); } void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 ctl; if (DISPLAY_VER(dev_priv) >= 11) intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0); ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING); ctl &= ~TRANS_DDI_FUNC_ENABLE; if (IS_DISPLAY_VER(dev_priv, 8, 10)) ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE | TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK); if (DISPLAY_VER(dev_priv) >= 12) { if (!intel_dp_mst_is_master_trans(crtc_state)) { ctl &= ~(TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } } else { ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { drm_dbg_kms(&dev_priv->drm, "Quirk Increase DDI disabled time\n"); /* Quirk time at 100ms for reliable operation */ msleep(100); } } int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, enum transcoder cpu_transcoder, bool enable, u32 hdcp_mask) { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; int ret = 0; wakeref = intel_display_power_get_if_enabled(dev_priv, intel_encoder->power_domain); if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), hdcp_mask, enable ? hdcp_mask : 0); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder = intel_attached_encoder(intel_connector); int type = intel_connector->base.connector_type; enum port port = encoder->port; enum transcoder cpu_transcoder; intel_wakeref_t wakeref; enum pipe pipe = 0; u32 tmp; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; if (!encoder->get_hw_state(encoder, &pipe)) { ret = false; goto out; } if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: case TRANS_DDI_MODE_SELECT_DVI: ret = type == DRM_MODE_CONNECTOR_HDMIA; break; case TRANS_DDI_MODE_SELECT_DP_SST: ret = type == DRM_MODE_CONNECTOR_eDP || type == DRM_MODE_CONNECTOR_DisplayPort; break; case TRANS_DDI_MODE_SELECT_DP_MST: /* if the transcoder is in MST state then * connector isn't connected */ ret = false; break; case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B: if (HAS_DP20(dev_priv)) /* 128b/132b */ ret = false; else /* FDI */ ret = type == DRM_MODE_CONNECTOR_VGA; break; default: ret = false; break; } out: intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, u8 *pipe_mask, bool *is_dp_mst) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = encoder->port; intel_wakeref_t wakeref; enum pipe p; u32 tmp; u8 mst_pipe_mask; *pipe_mask = 0; *is_dp_mst = false; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return; tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) { tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: case TRANS_DDI_EDP_INPUT_A_ONOFF: *pipe_mask = BIT(PIPE_A); break; case TRANS_DDI_EDP_INPUT_B_ONOFF: *pipe_mask = BIT(PIPE_B); break; case TRANS_DDI_EDP_INPUT_C_ONOFF: *pipe_mask = BIT(PIPE_C); break; } goto out; } mst_pipe_mask = 0; for_each_pipe(dev_priv, p) { enum transcoder cpu_transcoder = (enum transcoder)p; unsigned int port_mask, ddi_select; intel_wakeref_t trans_wakeref; trans_wakeref = intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder)); if (!trans_wakeref) continue; if (DISPLAY_VER(dev_priv) >= 12) { port_mask = TGL_TRANS_DDI_PORT_MASK; ddi_select = TGL_TRANS_DDI_SELECT_PORT(port); } else { port_mask = TRANS_DDI_PORT_MASK; ddi_select = TRANS_DDI_SELECT_PORT(port); } tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder), trans_wakeref); if ((tmp & port_mask) != ddi_select) continue; if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST || (HAS_DP20(dev_priv) && (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)) mst_pipe_mask |= BIT(p); *pipe_mask |= BIT(p); } if (!*pipe_mask) drm_dbg_kms(&dev_priv->drm, "No pipe for [ENCODER:%d:%s] found\n", encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { drm_dbg_kms(&dev_priv->drm, "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", encoder->base.base.id, encoder->base.name, *pipe_mask); *pipe_mask = BIT(ffs(*pipe_mask) - 1); } if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) drm_dbg_kms(&dev_priv->drm, "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n", encoder->base.base.id, encoder->base.name, *pipe_mask, mst_pipe_mask); else *is_dp_mst = mst_pipe_mask; out: if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) { tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port)); if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) drm_err(&dev_priv->drm, "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n", encoder->base.base.id, encoder->base.name, tmp); } intel_display_power_put(dev_priv, encoder->power_domain, wakeref); } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { u8 pipe_mask; bool is_mst; intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); if (is_mst || !pipe_mask) return false; *pipe = ffs(pipe_mask) - 1; return true; } static enum intel_display_power_domain intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); /* * ICL+ HW requires corresponding AUX IOs to be powered up for PSR with * DC states enabled at the same time, while for driver initiated AUX * transfers we need the same AUX IOs to be powered but with DC states * disabled. Accordingly use the AUX_IO_<port> power domain here which * leaves DC states enabled. * * Before MTL TypeC PHYs (in all TypeC modes and both DP/HDMI) also require * AUX IO to be enabled, but all these require DC_OFF to be enabled as * well, so we can acquire a wider AUX_<port> power domain reference * instead of a specific AUX_IO_<port> reference without powering up any * extra wells. */ if (intel_encoder_can_psr(&dig_port->base)) return intel_display_power_aux_io_domain(i915, dig_port->aux_ch); else if (DISPLAY_VER(i915) < 14 && (intel_crtc_has_dp_encoder(crtc_state) || intel_phy_is_tc(i915, phy))) return intel_aux_power_domain(dig_port); else return POWER_DOMAIN_INVALID; } static void main_link_aux_power_domain_get(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum intel_display_power_domain domain = intel_ddi_main_link_aux_domain(dig_port, crtc_state); drm_WARN_ON(&i915->drm, dig_port->aux_wakeref); if (domain == POWER_DOMAIN_INVALID) return; dig_port->aux_wakeref = intel_display_power_get(i915, domain); } static void main_link_aux_power_domain_put(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum intel_display_power_domain domain = intel_ddi_main_link_aux_domain(dig_port, crtc_state); intel_wakeref_t wf; wf = fetch_and_zero(&dig_port->aux_wakeref); if (!wf) return; intel_display_power_put(i915, domain, wf); } static void intel_ddi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; /* * TODO: Add support for MST encoders. Atm, the following should never * happen since fake-MST encoders don't set their get_power_domains() * hook. */ if (drm_WARN_ON(&dev_priv->drm, intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) return; dig_port = enc_to_dig_port(encoder); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); } main_link_aux_power_domain_get(dig_port, crtc_state); } void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; if (cpu_transcoder == TRANSCODER_EDP) return; if (DISPLAY_VER(dev_priv) >= 13) val = TGL_TRANS_CLK_SEL_PORT(phy); else if (DISPLAY_VER(dev_priv) >= 12) val = TGL_TRANS_CLK_SEL_PORT(encoder->port); else val = TRANS_CLK_SEL_PORT(encoder->port); intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val; if (cpu_transcoder == TRANSCODER_EDP) return; if (DISPLAY_VER(dev_priv) >= 12) val = TGL_TRANS_CLK_SEL_DISABLED; else val = TRANS_CLK_SEL_DISABLED; intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, enum port port, u8 iboost) { u32 tmp; tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0); tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); if (iboost) tmp |= iboost << BALANCE_LEG_SHIFT(port); else tmp |= BALANCE_LEG_DISABLE(port); intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp); } static void skl_ddi_set_iboost(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int level) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u8 iboost; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) iboost = intel_bios_hdmi_boost_level(encoder->devdata); else iboost = intel_bios_dp_boost_level(encoder->devdata); if (iboost == 0) { const struct intel_ddi_buf_trans *trans; int n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; iboost = trans->entries[level].hsw.i_boost; } /* Make sure that the requested I_boost is valid */ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) { drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost); return; } _skl_ddi_set_iboost(dev_priv, encoder->port, iboost); if (encoder->port == PORT_A && dig_port->max_lanes == 4) _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries; encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) n_entries = 1; if (drm_WARN_ON(&dev_priv->drm, n_entries > ARRAY_SIZE(index_to_dp_signal_levels))) n_entries = ARRAY_SIZE(index_to_dp_signal_levels); return index_to_dp_signal_levels[n_entries - 1] & DP_TRAIN_VOLTAGE_SWING_MASK; } /* * We assume that the full set of pre-emphasis values can be * used on all DDI platforms. Should that change we need to * rethink this code. */ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp) { return DP_TRAIN_PRE_EMPH_LEVEL_3; } static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state, int lane) { if (crtc_state->port_clock > 600000) return 0; if (crtc_state->lane_count == 4) return lane >= 1 ? LOADGEN_SELECT : 0; else return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0; } static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; u32 val; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED; intel_dp->hobl_active = is_hobl_buf_trans(trans); intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val, intel_dp->hobl_active ? val : 0); } /* Set PORT_TX_DW5 */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); val |= TAP3_DISABLE; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | RCOMP_SCALAR(0x98)); } /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff)); } /* Program PORT_TX_DW7 */ for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), N_SCALAR_MASK, N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); } } static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; int ln; /* * 1. If port type is eDP or DP, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); /* 2. Program loadgen select */ /* * Program PORT_TX_DW4 depending on Bit rate and used lanes * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1) * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0) * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln < 4; ln++) { intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), LOADGEN_SELECT, icl_combo_phy_loadgen_select(crtc_state, ln)); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); /* 4. Clear training enable to change swing values */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ icl_ddi_combo_vswing_program(encoder, crtc_state); /* 6. Set training enable to trigger update */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); } static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct intel_ddi_buf_trans *trans; int n_entries, ln; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; for (ln = 0; ln < 2; ln++) { intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), CRI_USE_FS32, 0); intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), CRI_USE_FS32, 0); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { int level; level = intel_ddi_level(encoder, crtc_state, 2*ln+0); intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_17_12_MASK, CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_17_12_MASK, CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { int level; level = intel_ddi_level(encoder, crtc_state, 2*ln+0); intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK, CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | CRI_TXDEEMPH_OVERRIDE_EN); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK, CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | CRI_TXDEEMPH_OVERRIDE_EN); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } /* * Program MG_CLKHUB<LN, port being used> with value from frequency table * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), CFG_LOW_RATE_LKREN_EN, crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | CFG_AMI_CK_DIV_OVERRIDE_EN, crtc_state->port_clock > 500000 ? CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | CFG_AMI_CK_DIV_OVERRIDE_EN : 0); intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | CFG_AMI_CK_DIV_OVERRIDE_EN, crtc_state->port_clock > 500000 ? CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | CFG_AMI_CK_DIV_OVERRIDE_EN : 0); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), 0, CRI_CALCINIT); intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), 0, CRI_CALCINIT); } } static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct intel_ddi_buf_trans *trans; int n_entries, ln; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; for (ln = 0; ln < 2; ln++) { int level; intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0); level = intel_ddi_level(encoder, crtc_state, 2*ln+0); intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port, ln), DKL_TX_PRESHOOT_COEFF_MASK | DKL_TX_DE_EMPAHSIS_COEFF_MASK | DKL_TX_VSWING_CONTROL_MASK, DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port, ln), DKL_TX_PRESHOOT_COEFF_MASK | DKL_TX_DE_EMPAHSIS_COEFF_MASK | DKL_TX_VSWING_CONTROL_MASK, DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln), DKL_TX_DP20BITMODE, 0); if (IS_ALDERLAKE_P(dev_priv)) { u32 val; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { if (ln == 0) { val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2); } else { val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3); val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3); } } else { val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); } intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln), DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, val); } } } static int translate_signal_level(struct intel_dp *intel_dp, u8 signal_levels) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i; for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) { if (index_to_dp_signal_levels[i] == signal_levels) return i; } drm_WARN(&i915->drm, 1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n", signal_levels); return 0; } static int intel_ddi_dp_level(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int lane) { u8 train_set = intel_dp->train_set[lane]; if (intel_dp_is_uhbr(crtc_state)) { return train_set & DP_TX_FFE_PRESET_VALUE_MASK; } else { u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); return translate_signal_level(intel_dp, signal_levels); } } int intel_ddi_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int lane) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; int level, n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&i915->drm, !trans)) return 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) level = intel_ddi_hdmi_level(encoder, trans); else level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state, lane); if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries)) level = n_entries - 1; return level; } static void hsw_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int level = intel_ddi_level(encoder, crtc_state, 0); enum port port = encoder->port; u32 signal_levels; if (has_iboost(dev_priv)) skl_ddi_set_iboost(encoder, crtc_state, level); /* HDMI ignores the rest */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return; signal_levels = DDI_BUF_TRANS_SELECT(level); drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~DDI_BUF_EMP_MASK; intel_dp->DP |= signal_levels; intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_sel_mask, u32 clk_sel, u32 clk_off) { mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, clk_sel_mask, clk_sel); /* * "This step and the step before must be * done with separate register writes." */ intel_de_rmw(i915, reg, clk_off, 0); mutex_unlock(&i915->display.dpll.lock); } static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_off) { mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, 0, clk_off); mutex_unlock(&i915->display.dpll.lock); } static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_off) { return !(intel_de_read(i915, reg) & clk_off); } static struct intel_shared_dpll * _icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_sel_mask, u32 clk_sel_shift) { enum intel_dpll_id id; id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift; return intel_get_shared_dpll_by_id(i915, id); } static void adls_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_port_to_phy(i915, encoder->port); if (drm_WARN_ON(&i915->drm, !pll)) return; _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy), ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static void adls_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy), ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy)); } static void rkl_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_port_to_phy(i915, encoder->port); if (drm_WARN_ON(&i915->drm, !pll)) return; _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static void rkl_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)); } static void dg1_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_port_to_phy(i915, encoder->port); if (drm_WARN_ON(&i915->drm, !pll)) return; /* * If we fail this, something went very wrong: first 2 PLLs should be * used by first 2 phys and last 2 PLLs by last phys */ if (drm_WARN_ON(&i915->drm, (pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) || (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C))) return; _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static void dg1_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); enum intel_dpll_id id; u32 val; val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy)); val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); id = val; /* * _DG1_DPCLKA0_CFGCR0 maps between DPLL 0 and 1 with one bit for phy A * and B while _DG1_DPCLKA1_CFGCR0 maps between DPLL 2 and 3 with one * bit for phy C and D. */ if (phy >= PHY_C) id += DPLL_ID_DG1_DPLL2; return intel_get_shared_dpll_by_id(i915, id); } static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_port_to_phy(i915, encoder->port); if (drm_WARN_ON(&i915->drm, !pll)) return; _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)); } static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; if (drm_WARN_ON(&i915->drm, !pll)) return; /* * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port. * MG does not exist, but the programming is required to ungate DDIC and DDID." */ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG); icl_ddi_combo_enable_clock(encoder, crtc_state); } static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; icl_ddi_combo_disable_clock(encoder); intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; u32 tmp; tmp = intel_de_read(i915, DDI_CLK_SEL(port)); if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) return false; return icl_ddi_combo_is_clock_enabled(encoder); } static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; if (drm_WARN_ON(&i915->drm, !pll)) return; intel_de_write(i915, DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0); mutex_unlock(&i915->display.dpll.lock); } static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); mutex_unlock(&i915->display.dpll.lock); intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; u32 tmp; tmp = intel_de_read(i915, DDI_CLK_SEL(port)); if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) return false; tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0); return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); } static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; enum intel_dpll_id id; u32 tmp; tmp = intel_de_read(i915, DDI_CLK_SEL(port)); switch (tmp & DDI_CLK_SEL_MASK) { case DDI_CLK_SEL_TBT_162: case DDI_CLK_SEL_TBT_270: case DDI_CLK_SEL_TBT_540: case DDI_CLK_SEL_TBT_810: id = DPLL_ID_ICL_TBTPLL; break; case DDI_CLK_SEL_MG: id = icl_tc_port_to_pll_id(tc_port); break; default: MISSING_CASE(tmp); fallthrough; case DDI_CLK_SEL_NONE: return NULL; } return intel_get_shared_dpll_by_id(i915, id); } static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum intel_dpll_id id; switch (encoder->port) { case PORT_A: id = DPLL_ID_SKL_DPLL0; break; case PORT_B: id = DPLL_ID_SKL_DPLL1; break; case PORT_C: id = DPLL_ID_SKL_DPLL2; break; default: MISSING_CASE(encoder->port); return NULL; } return intel_get_shared_dpll_by_id(i915, id); } static void skl_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; if (drm_WARN_ON(&i915->drm, !pll)) return; mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, DPLL_CTRL2_DDI_CLK_OFF(port) | DPLL_CTRL2_DDI_CLK_SEL_MASK(port), DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); mutex_unlock(&i915->display.dpll.lock); } static void skl_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, 0, DPLL_CTRL2_DDI_CLK_OFF(port)); mutex_unlock(&i915->display.dpll.lock); } static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; /* * FIXME Not sure if the override affects both * the PLL selection and the CLK_OFF bit. */ return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)); } static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; enum intel_dpll_id id; u32 tmp; tmp = intel_de_read(i915, DPLL_CTRL2); /* * FIXME Not sure if the override affects both * the PLL selection and the CLK_OFF bit. */ if ((tmp & DPLL_CTRL2_DDI_SEL_OVERRIDE(port)) == 0) return NULL; id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >> DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port); return intel_get_shared_dpll_by_id(i915, id); } void hsw_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; if (drm_WARN_ON(&i915->drm, !pll)) return; intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); } void hsw_ddi_disable_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); } bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE; } static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; enum intel_dpll_id id; u32 tmp; tmp = intel_de_read(i915, PORT_CLK_SEL(port)); switch (tmp & PORT_CLK_SEL_MASK) { case PORT_CLK_SEL_WRPLL1: id = DPLL_ID_WRPLL1; break; case PORT_CLK_SEL_WRPLL2: id = DPLL_ID_WRPLL2; break; case PORT_CLK_SEL_SPLL: id = DPLL_ID_SPLL; break; case PORT_CLK_SEL_LCPLL_810: id = DPLL_ID_LCPLL_810; break; case PORT_CLK_SEL_LCPLL_1350: id = DPLL_ID_LCPLL_1350; break; case PORT_CLK_SEL_LCPLL_2700: id = DPLL_ID_LCPLL_2700; break; default: MISSING_CASE(tmp); fallthrough; case PORT_CLK_SEL_NONE: return NULL; } return intel_get_shared_dpll_by_id(i915, id); } void intel_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { if (encoder->enable_clock) encoder->enable_clock(encoder, crtc_state); } void intel_ddi_disable_clock(struct intel_encoder *encoder) { if (encoder->disable_clock) encoder->disable_clock(encoder); } void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 port_mask; bool ddi_clk_needed; /* * In case of DP MST, we sanitize the primary encoder only, not the * virtual ones. */ if (encoder->type == INTEL_OUTPUT_DP_MST) return; if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { u8 pipe_mask; bool is_mst; intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); /* * In the unlikely case that BIOS enables DP in MST mode, just * warn since our MST HW readout is incomplete. */ if (drm_WARN_ON(&i915->drm, is_mst)) return; } port_mask = BIT(encoder->port); ddi_clk_needed = encoder->base.crtc; if (encoder->type == INTEL_OUTPUT_DSI) { struct intel_encoder *other_encoder; port_mask = intel_dsi_encoder_ports(encoder); /* * Sanity check that we haven't incorrectly registered another * encoder using any of the ports of this DSI encoder. */ for_each_intel_encoder(&i915->drm, other_encoder) { if (other_encoder == encoder) continue; if (drm_WARN_ON(&i915->drm, port_mask & BIT(other_encoder->port))) return; } /* * For DSI we keep the ddi clocks gated * except during enable/disable sequence. */ ddi_clk_needed = false; } if (ddi_clk_needed || !encoder->is_clock_enabled || !encoder->is_clock_enabled(encoder)) return; drm_notice(&i915->drm, "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n", encoder->base.base.id, encoder->base.name); encoder->disable_clock(encoder); } static void icl_program_mg_dp_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); u32 ln0, ln1, pin_assignment; u8 width; if (!intel_phy_is_tc(dev_priv, phy) || intel_tc_port_in_tbt_alt_mode(dig_port)) return; if (DISPLAY_VER(dev_priv) >= 12) { ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 0)); ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 1)); } else { ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); } ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); /* DPPATC */ pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port); width = crtc_state->lane_count; switch (pin_assignment) { case 0x0: drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); if (width == 1) { ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; case 0x1: if (width == 4) { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; case 0x2: if (width == 2) { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; case 0x3: case 0x5: if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; case 0x4: case 0x6: if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; default: MISSING_CASE(pin_assignment); } if (DISPLAY_VER(dev_priv) >= 12) { intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0); intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1); } else { intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); } } static enum transcoder tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) return crtc_state->mst_master_transcoder; else return crtc_state->cpu_transcoder; } i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (DISPLAY_VER(dev_priv) >= 12) return TGL_DP_TP_CTL(tgl_dp_tp_transcoder(crtc_state)); else return DP_TP_CTL(encoder->port); } i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (DISPLAY_VER(dev_priv) >= 12) return TGL_DP_TP_STATUS(tgl_dp_tp_transcoder(crtc_state)); else return DP_TP_STATUS(encoder->port); } static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!crtc_state->vrr.enable) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL, enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0) drm_dbg_kms(&i915->drm, "Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n", str_enable_disable(enable)); } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!crtc_state->fec_enable) return; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) drm_dbg_kms(&i915->drm, "Failed to set FEC_READY in the sink\n"); } static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (!crtc_state->fec_enable) return; intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 0, DP_TP_CTL_FEC_ENABLE); } static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (!crtc_state->fec_enable) return; intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_FEC_ENABLE, 0); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_phy_is_combo(i915, phy)) { bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; intel_combo_phy_power_up_lanes(i915, phy, false, crtc_state->lane_count, lane_reversal); } } /* Splitter enable for eDP MSO is limited to certain pipes. */ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) { if (IS_ALDERLAKE_P(i915)) return BIT(PIPE_A) | BIT(PIPE_B); else return BIT(PIPE_A); } static void intel_ddi_mso_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 dss1; if (!HAS_MSO(i915)) return; dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE; if (!pipe_config->splitter.enable) return; if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) { pipe_config->splitter.enable = false; return; } switch (dss1 & SPLITTER_CONFIGURATION_MASK) { default: drm_WARN(&i915->drm, true, "Invalid splitter configuration, dss1=0x%08x\n", dss1); fallthrough; case SPLITTER_CONFIGURATION_2_SEGMENT: pipe_config->splitter.link_count = 2; break; case SPLITTER_CONFIGURATION_4_SEGMENT: pipe_config->splitter.link_count = 4; break; } pipe_config->splitter.pixel_overlap = REG_FIELD_GET(OVERLAP_PIXELS_MASK, dss1); } static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 dss1 = 0; if (!HAS_MSO(i915)) return; if (crtc_state->splitter.enable) { dss1 |= SPLITTER_ENABLE; dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap); if (crtc_state->splitter.link_count == 2) dss1 |= SPLITTER_CONFIGURATION_2_SEGMENT; else dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT; } intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe), SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK | OVERLAP_PIXELS_MASK, dss1); } static u8 mtl_get_port_width(u8 lane_count) { switch (lane_count) { case 1: return 0; case 2: return 1; case 3: return 4; case 4: return 3; default: MISSING_CASE(lane_count); return 4; } } static void mtl_ddi_enable_d2d(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0, XELPDP_PORT_BUF_D2D_LINK_ENABLE); if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) { drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n", port_name(port)); } } static void mtl_port_buf_ctl_program(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum port port = encoder->port; u32 val; val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); val &= ~XELPDP_PORT_WIDTH_MASK; val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count)); val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK; if (intel_dp_is_uhbr(crtc_state)) val |= XELPDP_PORT_BUF_PORT_DATA_40BIT; else val |= XELPDP_PORT_BUF_PORT_DATA_10BIT; if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) val |= XELPDP_PORT_REVERSAL; intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); } static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val; val = intel_tc_port_in_tbt_alt_mode(dig_port) ? XELPDP_PORT_BUF_IO_SELECT_TBT : 0; intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, val); } static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); /* * We only configure what the register value will be here. Actual * enabling happens during link training farther down. */ intel_ddi_init_dp_buf_reg(encoder, crtc_state); /* * 1. Enable Power Wells * * This was handled at the beginning of intel_atomic_commit_tail(), * before we called down into this function. */ /* 2. PMdemand was already set */ /* 3. Select Thunderbolt */ mtl_port_buf_ctl_io_selection(encoder); /* 4. Enable Panel Power if PPS is required */ intel_pps_on(intel_dp); /* 5. Enable the port PLL */ intel_ddi_enable_clock(encoder, crtc_state); /* * 6.a Configure Transcoder Clock Select to direct the Port clock to the * Transcoder. */ intel_ddi_enable_transcoder_clock(encoder, crtc_state); /* * 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings. */ intel_ddi_config_transcoder_dp2(encoder, crtc_state); /* * 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST * Transport Select */ intel_ddi_config_transcoder_func(encoder, crtc_state); /* * 6.e Program CoG/MSO configuration bits in DSS_CTL1 if selected. */ intel_ddi_mso_configure(crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit * in the FEC_CONFIGURATION register to 1 before initiating link * training */ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); /* * 6. The rest of the below are substeps under the bspec's "Enable and * Train Display Port" step. Note that steps that are specific to * MST will be handled by intel_mst_pre_enable_dp() before/after it * calls into this function. Also intel_mst_pre_enable_dp() only calls * us when active_mst_links==0, so any steps designated for "single * stream or multi-stream master transcoder" can just be performed * unconditionally here. * * mtl_ddi_prepare_link_retrain() that is called by * intel_dp_start_link_train() will execute steps: 6.d, 6.f, 6.g, 6.h, * 6.i and 6.j * * 6.k Follow DisplayPort specification training sequence (see notes for * failure handling) * 6.m If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) * (timeout after 800 us) */ intel_dp_start_link_train(intel_dp, crtc_state); /* 6.n Set DP_TP_CTL link training to Normal */ if (!is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp, crtc_state); /* 6.o Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); /* * We only configure what the register value will be here. Actual * enabling happens during link training farther down. */ intel_ddi_init_dp_buf_reg(encoder, crtc_state); /* * 1. Enable Power Wells * * This was handled at the beginning of intel_atomic_commit_tail(), * before we called down into this function. */ /* 2. Enable Panel Power if PPS is required */ intel_pps_on(intel_dp); /* * 3. For non-TBT Type-C ports, set FIA lane count * (DFLEXDPSP.DPX4TXLATC) * * This was done before tgl_ddi_pre_enable_dp by * hsw_crtc_enable()->intel_encoders_pre_pll_enable(). */ /* * 4. Enable the port PLL. * * The PLL enabling itself was already done before this function by * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only * configure the PLL to port mapping here. */ intel_ddi_enable_clock(encoder, crtc_state); /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); } /* 6. Program DP_MODE */ icl_program_mg_dp_mode(dig_port, crtc_state); /* * 7. The rest of the below are substeps under the bspec's "Enable and * Train Display Port" step. Note that steps that are specific to * MST will be handled by intel_mst_pre_enable_dp() before/after it * calls into this function. Also intel_mst_pre_enable_dp() only calls * us when active_mst_links==0, so any steps designated for "single * stream or multi-stream master transcoder" can just be performed * unconditionally here. */ /* * 7.a Configure Transcoder Clock Select to direct the Port clock to the * Transcoder. */ intel_ddi_enable_transcoder_clock(encoder, crtc_state); if (HAS_DP20(dev_priv)) intel_ddi_config_transcoder_dp2(encoder, crtc_state); /* * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST * Transport Select */ intel_ddi_config_transcoder_func(encoder, crtc_state); /* * 7.c Configure & enable DP_TP_CTL with link training pattern 1 * selected * * This will be handled by the intel_dp_start_link_train() farther * down this function. */ /* 7.e Configure voltage swing and related IO settings */ encoder->set_signal_levels(encoder, crtc_state); /* * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up * the used lanes of the DDI. */ intel_ddi_power_up_lanes(encoder, crtc_state); /* * 7.g Program CoG/MSO configuration bits in DSS_CTL1 if selected. */ intel_ddi_mso_configure(crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit * in the FEC_CONFIGURATION register to 1 before initiating link * training */ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); /* * 7.i Follow DisplayPort specification training sequence (see notes for * failure handling) * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) * (timeout after 800 us) */ intel_dp_start_link_train(intel_dp, crtc_state); /* 7.k Set DP_TP_CTL link training to Normal */ if (!is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp, crtc_state); /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); if (DISPLAY_VER(dev_priv) < 11) drm_WARN_ON(&dev_priv->drm, is_mst && (port == PORT_A || port == PORT_E)); else drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); /* * We only configure what the register value will be here. Actual * enabling happens during link training farther down. */ intel_ddi_init_dp_buf_reg(encoder, crtc_state); intel_pps_on(intel_dp); intel_ddi_enable_clock(encoder, crtc_state); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); } icl_program_mg_dp_mode(dig_port, crtc_state); if (has_buf_trans_select(dev_priv)) hsw_prepare_dp_ddi_buffers(encoder, crtc_state); encoder->set_signal_levels(encoder, crtc_state); intel_ddi_power_up_lanes(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp, crtc_state); if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) && !is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp, crtc_state); intel_ddi_enable_fec(encoder, crtc_state); if (!is_mst) intel_ddi_enable_transcoder_clock(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (HAS_DP20(dev_priv)) intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), crtc_state); if (DISPLAY_VER(dev_priv) >= 14) mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); /* MST will call a setting of MSA after an allocating of Virtual Channel * from MST encoder pre_enable callback. */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) intel_ddi_set_dp_msa(crtc_state, conn_state); } static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_enable_clock(encoder, crtc_state); drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); intel_ddi_enable_transcoder_clock(encoder, crtc_state); dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); } static void intel_ddi_pre_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* * When called from DP MST code: * - conn_state will be NULL * - encoder will be the main encoder (ie. mst->primary) * - the main connector associated with this port * won't be active or linked to a crtc * - crtc_state will be the state of the first stream to * be activated on this port, and it may not be the same * stream that will be deactivated last, but each stream * should have a state that is identical when it comes to * the DP link parameteres */ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { intel_ddi_pre_enable_hdmi(state, encoder, crtc_state, conn_state); } else { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); /* FIXME precompute everything properly */ /* FIXME how do we turn infoframes off again? */ if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp)) dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); } } static void mtl_ddi_disable_d2d_link(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0); if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n", port_name(port)); } static void mtl_disable_ddi_buf(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u32 val; /* 3.b Clear DDI_CTL_DE Enable to 0. */ val = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (val & DDI_BUF_CTL_ENABLE) { val &= ~DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), val); /* 3.c Poll for PORT_BUF_CTL Idle Status == 1, timeout after 100us */ mtl_wait_ddi_buf_idle(dev_priv, port); } /* 3.d Disable D2D Link */ mtl_ddi_disable_d2d_link(encoder); /* 3.e Disable DP_TP_CTL */ if (intel_crtc_has_dp_encoder(crtc_state)) { intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_ENABLE, 0); } } static void disable_ddi_buf(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; bool wait = false; u32 val; val = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (val & DDI_BUF_CTL_ENABLE) { val &= ~DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), val); wait = true; } if (intel_crtc_has_dp_encoder(crtc_state)) intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_ENABLE, 0); /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); } static void intel_disable_ddi_buf(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (DISPLAY_VER(dev_priv) >= 14) { mtl_disable_ddi_buf(encoder, crtc_state); /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */ intel_ddi_disable_fec_state(encoder, crtc_state); } else { disable_ddi_buf(encoder, crtc_state); } } static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; intel_wakeref_t wakeref; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); if (!is_mst) intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state); /* * Power down sink before disabling the port, otherwise we end * up getting interrupts from the sink on detecting link loss. */ intel_dp_set_power(intel_dp, DP_SET_POWER_D3); if (DISPLAY_VER(dev_priv) >= 12) { if (is_mst) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK, 0); } } else { if (!is_mst) intel_ddi_disable_transcoder_clock(old_crtc_state); } intel_disable_ddi_buf(encoder, old_crtc_state); /* * From TGL spec: "If single stream or multi-stream master transcoder: * Configure Transcoder Clock select to direct no clock to the * transcoder" */ if (DISPLAY_VER(dev_priv) >= 12) intel_ddi_disable_transcoder_clock(old_crtc_state); intel_pps_vdd_on(intel_dp); intel_pps_off(intel_dp); wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref); if (wakeref) intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, wakeref); intel_ddi_disable_clock(encoder); /* De-select Thunderbolt */ if (DISPLAY_VER(dev_priv) >= 14) intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, 0); } static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; intel_wakeref_t wakeref; dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); if (DISPLAY_VER(dev_priv) < 12) intel_ddi_disable_transcoder_clock(old_crtc_state); intel_disable_ddi_buf(encoder, old_crtc_state); if (DISPLAY_VER(dev_priv) >= 12) intel_ddi_disable_transcoder_clock(old_crtc_state); wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref); if (wakeref) intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, wakeref); intel_ddi_disable_clock(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } static void intel_ddi_post_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *slave_crtc; if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { intel_crtc_vblank_off(old_crtc_state); intel_disable_transcoder(old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state); intel_dsc_disable(old_crtc_state); if (DISPLAY_VER(dev_priv) >= 9) skl_scaler_disable(old_crtc_state); else ilk_pfit_disable(old_crtc_state); } for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) { const struct intel_crtc_state *old_slave_crtc_state = intel_atomic_get_old_crtc_state(state, slave_crtc); intel_crtc_vblank_off(old_slave_crtc_state); intel_dsc_disable(old_slave_crtc_state); skl_scaler_disable(old_slave_crtc_state); } /* * When called from DP MST code: * - old_conn_state will be NULL * - encoder will be the main encoder (ie. mst->primary) * - the main connector associated with this port * won't be active or linked to a crtc * - old_crtc_state will be the state of the last stream to * be deactivated on this port, and it may not be the same * stream that was activated last, but each stream * should have a state that is identical when it comes to * the DP link parameteres */ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); else intel_ddi_post_disable_dp(state, encoder, old_crtc_state, old_conn_state); } static void intel_ddi_post_pll_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(i915, encoder->port); bool is_tc_port = intel_phy_is_tc(i915, phy); main_link_aux_power_domain_put(dig_port, old_crtc_state); if (is_tc_port) intel_tc_port_put_link(dig_port); } static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; if (!crtc_state->sync_mode_slaves_mask) return; for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *slave_encoder = to_intel_encoder(conn_state->best_encoder); struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc); const struct intel_crtc_state *slave_crtc_state; if (!slave_crtc) continue; slave_crtc_state = intel_atomic_get_new_crtc_state(state, slave_crtc); if (slave_crtc_state->master_transcoder != crtc_state->cpu_transcoder) continue; intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder), slave_crtc_state); } usleep_range(200, 400); intel_dp_stop_link_train(enc_to_intel_dp(encoder), crtc_state); } static void intel_enable_ddi_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum port port = encoder->port; if (port == PORT_A && DISPLAY_VER(dev_priv) < 9) intel_dp_stop_link_train(intel_dp, crtc_state); drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp)) intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); intel_audio_codec_enable(encoder, crtc_state, conn_state); trans_port_sync_stop_link_train(state, encoder, crtc_state); } static i915_reg_t gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, enum port port) { static const enum transcoder trans[] = { [PORT_A] = TRANSCODER_EDP, [PORT_B] = TRANSCODER_A, [PORT_C] = TRANSCODER_B, [PORT_D] = TRANSCODER_C, [PORT_E] = TRANSCODER_A, }; drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9); if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E)) port = PORT_A; return CHICKEN_TRANS(trans[port]); } static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); u32 buf_ctl; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_scrambling)) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); if (has_buf_trans_select(dev_priv)) hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state); /* e. Enable D2D Link for C10/C20 Phy */ if (DISPLAY_VER(dev_priv) >= 14) mtl_ddi_enable_d2d(encoder); encoder->set_signal_levels(encoder, crtc_state); /* Display WA #1143: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { /* * For some reason these chicken bits have been * stuffed into a transcoder register, event though * the bits affect a specific DDI port rather than * a specific transcoder. */ i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port); u32 val; val = intel_de_read(dev_priv, reg); if (port == PORT_E) val |= DDIE_TRAINING_OVERRIDE_ENABLE | DDIE_TRAINING_OVERRIDE_VALUE; else val |= DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); udelay(1); if (port == PORT_E) val &= ~(DDIE_TRAINING_OVERRIDE_ENABLE | DDIE_TRAINING_OVERRIDE_VALUE); else val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE); intel_de_write(dev_priv, reg, val); } intel_ddi_power_up_lanes(encoder, crtc_state); /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. * * On ADL_P the PHY link rate and lane count must be programmed but * these are both 0 for HDMI. * * But MTL onwards HDMI2.1 is supported and in TMDS mode this * is filled with lane count, already set in the crtc_state. * The same is required to be filled in PORT_BUF_CTL for C10/20 Phy. */ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE; if (DISPLAY_VER(dev_priv) >= 14) { u8 lane_count = mtl_get_port_width(crtc_state->lane_count); u32 port_buf = 0; port_buf |= XELPDP_PORT_WIDTH(lane_count); if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL) port_buf |= XELPDP_PORT_REVERSAL; intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); buf_ctl |= DDI_PORT_WIDTH(lane_count); } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); intel_wait_ddi_buf_active(dev_priv, port); intel_audio_codec_enable(encoder, crtc_state, conn_state); } static void intel_enable_ddi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); if (!intel_crtc_is_bigjoiner_slave(crtc_state)) intel_ddi_enable_transcoder_func(encoder, crtc_state); /* Enable/Disable DP2.0 SDP split config before transcoder */ intel_audio_sdp_split_update(encoder, crtc_state); intel_enable_transcoder(crtc_state); intel_crtc_vblank_on(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state); else intel_enable_ddi_dp(state, encoder, crtc_state, conn_state); /* Enable hdcp if it's desired */ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) intel_hdcp_enable(state, encoder, crtc_state, conn_state); } static void intel_disable_ddi_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); /* Disable the decompression in DP Sink */ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, false); /* Disable Ignore_MSA bit in DP Sink */ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state, false); } static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); } static void intel_disable_ddi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_tc_port_link_cancel_reset_work(enc_to_dig_port(encoder)); intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) intel_disable_ddi_hdmi(state, encoder, old_crtc_state, old_conn_state); else intel_disable_ddi_dp(state, encoder, old_crtc_state, old_conn_state); } static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { intel_ddi_set_dp_msa(crtc_state, conn_state); intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); intel_backlight_update(state, encoder, crtc_state, conn_state); drm_connector_update_privacy_screen(conn_state); } void intel_ddi_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !intel_encoder_is_mst(encoder)) intel_ddi_update_pipe_dp(state, encoder, crtc_state, conn_state); intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state); } void intel_ddi_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_crtc *slave_crtc; enum phy phy = intel_port_to_phy(i915, encoder->port); /* FIXME: Add MTL pll_mgr */ if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy)) return; intel_update_active_dpll(state, crtc, encoder); for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(crtc_state)) intel_update_active_dpll(state, slave_crtc, encoder); } static void intel_ddi_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); if (is_tc_port) { struct intel_crtc *master_crtc = to_intel_crtc(crtc_state->uapi.crtc); intel_tc_port_get_link(dig_port, crtc_state->lane_count); intel_ddi_update_active_dpll(state, encoder, master_crtc); } main_link_aux_power_domain_get(dig_port, crtc_state); if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port)) /* * Program the lane count for static/dynamic connections on * Type-C ports. Skip this step for TBT. */ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) bxt_ddi_phy_set_lane_optim_mask(encoder, crtc_state->lane_lat_optim_mask); } static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); int ln; for (ln = 0; ln < 2; ln++) intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0); } static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u32 dp_tp_ctl; /* * TODO: To train with only a different voltage swing entry is not * necessary disable and enable port */ dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (dp_tp_ctl & DP_TP_CTL_ENABLE) mtl_disable_ddi_buf(encoder, crtc_state); /* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */ dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { dp_tp_ctl |= DP_TP_CTL_MODE_MST; } else { dp_tp_ctl |= DP_TP_CTL_MODE_SST; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); /* 6.f Enable D2D Link */ mtl_ddi_enable_d2d(encoder); /* 6.g Configure voltage swing and related IO settings */ encoder->set_signal_levels(encoder, crtc_state); /* 6.h Configure PORT_BUF_CTL1 */ mtl_port_buf_ctl_program(encoder, crtc_state); /* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */ intel_dp->DP |= DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */ intel_wait_ddi_buf_active(dev_priv, port); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u32 dp_tp_ctl, ddi_buf_ctl; bool wait = false; dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (dp_tp_ctl & DP_TP_CTL_ENABLE) { ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) { intel_de_write(dev_priv, DDI_BUF_CTL(port), ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE); wait = true; } dp_tp_ctl &= ~DP_TP_CTL_ENABLE; intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); } dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { dp_tp_ctl |= DP_TP_CTL_MODE_MST; } else { dp_tp_ctl |= DP_TP_CTL_MODE_SST; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (IS_ALDERLAKE_P(dev_priv) && (intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port))) adlp_tbt_to_dp_alt_switch_wa(encoder); intel_dp->DP |= DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); intel_wait_ddi_buf_active(dev_priv, port); } static void intel_ddi_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 temp; temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; break; case DP_TRAINING_PATTERN_1: temp |= DP_TP_CTL_LINK_TRAIN_PAT1; break; case DP_TRAINING_PATTERN_2: temp |= DP_TP_CTL_LINK_TRAIN_PAT2; break; case DP_TRAINING_PATTERN_3: temp |= DP_TP_CTL_LINK_TRAIN_PAT3; break; case DP_TRAINING_PATTERN_4: temp |= DP_TP_CTL_LINK_TRAIN_PAT4; break; } intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp); } static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only * reason we need to set idle transmission mode is to work around a HW * issue where we enable the pipe while not in idle link-training mode. * In this case there is requirement to wait for a minimum number of * idle patterns to be sent. */ if (port == PORT_A && DISPLAY_VER(dev_priv) < 12) return; if (intel_de_wait_for_set(dev_priv, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_IDLE_DONE, 1)) drm_err(&dev_priv->drm, "Timed out waiting for DP idle patterns\n"); } static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { if (cpu_transcoder == TRANSCODER_EDP) return false; if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO)) return false; return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) & AUDIO_OUTPUT_ENABLE(cpu_transcoder); } void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 3; else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; } static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { u32 master_select; if (DISPLAY_VER(dev_priv) >= 11) { u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder)); if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0) return INVALID_TRANSCODER; master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2); } else { u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0) return INVALID_TRANSCODER; master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl); } if (master_select == 0) return TRANSCODER_EDP; else return master_select - 1; } static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); enum transcoder cpu_transcoder; crtc_state->master_transcoder = bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder); for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { enum intel_display_power_domain power_domain; intel_wakeref_t trans_wakeref; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); trans_wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!trans_wakeref) continue; if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) == crtc_state->cpu_transcoder) crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); intel_display_power_put(dev_priv, power_domain, trans_wakeref); } drm_WARN_ON(&dev_priv->drm, crtc_state->master_transcoder != INVALID_TRANSCODER && crtc_state->sync_mode_slaves_mask); } static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 temp, flags = 0; temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (temp & TRANS_DDI_PVSYNC) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->hw.adjusted_mode.flags |= flags; switch (temp & TRANS_DDI_BPC_MASK) { case TRANS_DDI_BPC_6: pipe_config->pipe_bpp = 18; break; case TRANS_DDI_BPC_8: pipe_config->pipe_bpp = 24; break; case TRANS_DDI_BPC_10: pipe_config->pipe_bpp = 30; break; case TRANS_DDI_BPC_12: pipe_config->pipe_bpp = 36; break; default: break; } switch (temp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: pipe_config->has_hdmi_sink = true; pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; if (temp & TRANS_DDI_HDMI_SCRAMBLING) pipe_config->hdmi_scrambling = true; if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) pipe_config->hdmi_high_tmds_clock_ratio = true; fallthrough; case TRANS_DDI_MODE_SELECT_DVI: pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); if (DISPLAY_VER(dev_priv) >= 14) pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; else pipe_config->lane_count = 4; break; case TRANS_DDI_MODE_SELECT_DP_SST: if (encoder->type == INTEL_OUTPUT_EDP) pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); else pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &pipe_config->dp_m_n); intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, &pipe_config->dp_m2_n2); if (DISPLAY_VER(dev_priv) >= 11) { i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); pipe_config->fec_enable = intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] Fec status: %u\n", encoder->base.base.id, encoder->base.name, pipe_config->fec_enable); } if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp)) pipe_config->infoframes.enable |= intel_lspcon_infoframes_enabled(encoder, pipe_config); else pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); break; case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B: if (!HAS_DP20(dev_priv)) { /* FDI */ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); break; } fallthrough; /* 128b/132b */ case TRANS_DDI_MODE_SELECT_DP_MST: pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; if (DISPLAY_VER(dev_priv) >= 12) pipe_config->mst_master_transcoder = REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &pipe_config->dp_m_n); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); break; default: break; } } static void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; /* XXX: DSI transcoder paranoia */ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; intel_ddi_read_func_ctl(encoder, pipe_config); intel_ddi_mso_get_config(encoder, pipe_config); pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); if (encoder->type == INTEL_OUTPUT_EDP) intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp); ddi_dotclock_get(pipe_config); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_get_lane_lat_optim_mask(encoder); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); intel_hdmi_read_gcp_infoframe(encoder, pipe_config); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_AVI, &pipe_config->infoframes.avi); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_SPD, &pipe_config->infoframes.spd); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_VENDOR, &pipe_config->infoframes.hdmi); intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_DRM, &pipe_config->infoframes.drm); if (DISPLAY_VER(dev_priv) >= 8) bdw_get_trans_port_sync_config(pipe_config); intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA); intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC); intel_psr_get_config(encoder, pipe_config); intel_audio_codec_get_config(encoder, pipe_config); } void intel_ddi_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id]; bool pll_active; if (drm_WARN_ON(&i915->drm, !pll)) return; port_dpll->pll = pll; pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state); drm_WARN_ON(&i915->drm, !pll_active); icl_set_active_port_dpll(crtc_state, port_dpll_id); crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, &crtc_state->dpll_hw_state); } static void mtl_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); if (intel_tc_port_in_tbt_alt_mode(dig_port)) { crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); } else if (intel_is_c10phy(i915, phy)) { intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10); intel_c10pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c10); crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); } else { intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20); intel_c20pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c20); crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); } intel_ddi_get_config(encoder, crtc_state); } static void dg2_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state); crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state); intel_ddi_get_config(encoder, crtc_state); } static void adls_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, adls_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void rkl_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, rkl_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void dg1_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, dg1_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void icl_ddi_combo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, icl_ddi_combo_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll) { return pll->info->id == DPLL_ID_ICL_TBTPLL; } static enum icl_port_dpll_id icl_ddi_tc_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; if (drm_WARN_ON(&i915->drm, !pll)) return ICL_PORT_DPLL_DEFAULT; if (icl_ddi_tc_pll_is_tbt(pll)) return ICL_PORT_DPLL_DEFAULT; else return ICL_PORT_DPLL_MG_PHY; } enum icl_port_dpll_id intel_ddi_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { if (!encoder->port_pll_type) return ICL_PORT_DPLL_DEFAULT; return encoder->port_pll_type(encoder, crtc_state); } static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum icl_port_dpll_id port_dpll_id; struct icl_port_dpll *port_dpll; bool pll_active; if (drm_WARN_ON(&i915->drm, !pll)) return; if (icl_ddi_tc_pll_is_tbt(pll)) port_dpll_id = ICL_PORT_DPLL_DEFAULT; else port_dpll_id = ICL_PORT_DPLL_MG_PHY; port_dpll = &crtc_state->icl_port_dplls[port_dpll_id]; port_dpll->pll = pll; pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state); drm_WARN_ON(&i915->drm, !pll_active); icl_set_active_port_dpll(crtc_state, port_dpll_id); if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll)) crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port); else crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, &crtc_state->dpll_hw_state); } static void icl_ddi_tc_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { icl_ddi_tc_get_clock(encoder, crtc_state, icl_ddi_tc_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void bxt_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, bxt_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void skl_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, skl_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } void hsw_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { intel_ddi_get_clock(encoder, crtc_state, hsw_ddi_get_pll(encoder)); intel_ddi_get_config(encoder, crtc_state); } static void intel_ddi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_phy_is_tc(i915, phy)) intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), crtc_state); if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) intel_dp_sync_state(encoder, crtc_state); } static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); bool fastset = true; if (intel_phy_is_tc(i915, phy)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.mode_changed = true; fastset = false; } if (intel_crtc_has_dp_encoder(crtc_state) && !intel_dp_initial_fastset_check(encoder, crtc_state)) fastset = false; return fastset; } static enum intel_output_type intel_ddi_compute_output_type(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { switch (conn_state->connector->connector_type) { case DRM_MODE_CONNECTOR_HDMIA: return INTEL_OUTPUT_HDMI; case DRM_MODE_CONNECTOR_eDP: return INTEL_OUTPUT_EDP; case DRM_MODE_CONNECTOR_DisplayPort: return INTEL_OUTPUT_DP; default: MISSING_CASE(conn_state->connector->connector_type); return INTEL_OUTPUT_UNUSED; } } static int intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int ret; if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) { pipe_config->has_hdmi_sink = intel_hdmi_compute_has_hdmi_sink(encoder, pipe_config, conn_state); ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); } else { ret = intel_dp_compute_config(encoder, pipe_config, conn_state); } if (ret) return ret; if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A && pipe_config->cpu_transcoder == TRANSCODER_EDP) pipe_config->pch_pfit.force_thru = pipe_config->pch_pfit.enabled || pipe_config->crc_enabled; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); return 0; } static bool mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) { return drm_mode_match(mode1, mode2, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS) && mode1->clock == mode2->clock; /* we want an exact match */ } static bool m_n_equal(const struct intel_link_m_n *m_n_1, const struct intel_link_m_n *m_n_2) { return m_n_1->tu == m_n_2->tu && m_n_1->data_m == m_n_2->data_m && m_n_1->data_n == m_n_2->data_n && m_n_1->link_m == m_n_2->link_m && m_n_1->link_n == m_n_2->link_n; } static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1, const struct intel_crtc_state *crtc_state2) { return crtc_state1->hw.active && crtc_state2->hw.active && crtc_state1->output_types == crtc_state2->output_types && crtc_state1->output_format == crtc_state2->output_format && crtc_state1->lane_count == crtc_state2->lane_count && crtc_state1->port_clock == crtc_state2->port_clock && mode_equal(&crtc_state1->hw.adjusted_mode, &crtc_state2->hw.adjusted_mode) && m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n); } static u8 intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state, int tile_group_id) { struct drm_connector *connector; const struct drm_connector_state *conn_state; struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev); struct intel_atomic_state *state = to_intel_atomic_state(ref_crtc_state->uapi.state); u8 transcoders = 0; int i; /* * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. */ if (DISPLAY_VER(dev_priv) < 9) return 0; if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP)) return 0; for_each_new_connector_in_state(&state->base, connector, conn_state, i) { struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); const struct intel_crtc_state *crtc_state; if (!crtc) continue; if (!connector->has_tile || connector->tile_group->id != tile_group_id) continue; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtcs_port_sync_compatible(ref_crtc_state, crtc_state)) continue; transcoders |= BIT(crtc_state->cpu_transcoder); } return transcoders; } static int intel_ddi_compute_config_late(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = conn_state->connector; u8 port_sync_transcoders = 0; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]", encoder->base.base.id, encoder->base.name, crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name); if (connector->has_tile) port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state, connector->tile_group->id); /* * EDP Transcoders cannot be ensalved * make them a master always when present */ if (port_sync_transcoders & BIT(TRANSCODER_EDP)) crtc_state->master_transcoder = TRANSCODER_EDP; else crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1; if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) { crtc_state->master_transcoder = INVALID_TRANSCODER; crtc_state->sync_mode_slaves_mask = port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder); } return 0; } static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->dev); struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_dp_encoder_flush_work(encoder); if (intel_phy_is_tc(i915, phy)) intel_tc_port_cleanup(dig_port); intel_display_power_flush_work(i915); drm_encoder_cleanup(encoder); kfree(dig_port->hdcp_port_data.streams); kfree(dig_port); } static void intel_ddi_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_dp->reset_link_params = true; intel_pps_encoder_reset(intel_dp); if (intel_phy_is_tc(i915, phy)) intel_tc_port_init_mode(dig_port); } static int intel_ddi_encoder_late_register(struct drm_encoder *_encoder) { struct intel_encoder *encoder = to_intel_encoder(_encoder); intel_tc_port_link_reset(enc_to_dig_port(encoder)); return 0; } static const struct drm_encoder_funcs intel_ddi_funcs = { .reset = intel_ddi_encoder_reset, .destroy = intel_ddi_encoder_destroy, .late_register = intel_ddi_encoder_late_register, }; static struct intel_connector * intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_connector *connector; enum port port = dig_port->base.port; connector = intel_connector_alloc(); if (!connector) return NULL; dig_port->dp.output_reg = DDI_BUF_CTL(port); if (DISPLAY_VER(i915) >= 14) dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain; else dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; dig_port->dp.set_link_train = intel_ddi_set_link_train; dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; if (!intel_dp_init_connector(dig_port, connector)) { kfree(connector); return NULL; } if (dig_port->base.type == INTEL_OUTPUT_EDP) { struct drm_device *dev = dig_port->base.base.dev; struct drm_privacy_screen *privacy_screen; privacy_screen = drm_privacy_screen_get(dev->dev, NULL); if (!IS_ERR(privacy_screen)) { drm_connector_attach_privacy_screen_provider(&connector->base, privacy_screen); } else if (PTR_ERR(privacy_screen) != -ENODEV) { drm_warn(dev, "Error getting privacy-screen\n"); } } return connector; } static int modeset_pipe(struct drm_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_atomic_state *state; struct drm_crtc_state *crtc_state; int ret; state = drm_atomic_state_alloc(crtc->dev); if (!state) return -ENOMEM; state->acquire_ctx = ctx; to_intel_atomic_state(state)->internal = true; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } crtc_state->connectors_changed = true; ret = drm_atomic_commit(state); out: drm_atomic_state_put(state); return ret; } static int intel_hdmi_reset_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); struct intel_connector *connector = hdmi->attached_connector; struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); struct drm_connector_state *conn_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; u8 config; int ret; if (!connector || connector->base.status != connector_status_connected) return 0; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, ctx); if (ret) return ret; conn_state = connector->base.state; crtc = to_intel_crtc(conn_state->crtc); if (!crtc) return 0; ret = drm_modeset_lock(&crtc->base.mutex, ctx); if (ret) return ret; crtc_state = to_intel_crtc_state(crtc->base.state); drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); if (!crtc_state->hw.active) return 0; if (!crtc_state->hdmi_high_tmds_clock_ratio && !crtc_state->hdmi_scrambling) return 0; if (conn_state->commit && !try_wait_for_completion(&conn_state->commit->hw_done)) return 0; ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); if (ret < 0) { drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n", connector->base.base.id, connector->base.name, ret); return 0; } if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) == crtc_state->hdmi_high_tmds_clock_ratio && !!(config & SCDC_SCRAMBLING_ENABLE) == crtc_state->hdmi_scrambling) return 0; /* * HDMI 2.0 says that one should not send scrambled data * prior to configuring the sink scrambling, and that * TMDS clock/data transmission should be suspended when * changing the TMDS clock rate in the sink. So let's * just do a full modeset here, even though some sinks * would be perfectly happy if were to just reconfigure * the SCDC settings on the fly. */ return modeset_pipe(&crtc->base, ctx); } static enum intel_hotplug_state intel_ddi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; enum phy phy = intel_port_to_phy(i915, encoder->port); bool is_tc = intel_phy_is_tc(i915, phy); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; if (intel_dp->compliance.test_active && intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { intel_dp_phy_test(encoder); /* just do the PHY test and nothing else */ return INTEL_HOTPLUG_UNCHANGED; } state = intel_encoder_hotplug(encoder, connector); if (!intel_tc_port_link_reset(dig_port)) { intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) { if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) ret = intel_hdmi_reset_link(encoder, &ctx); else ret = intel_dp_retrain_link(encoder, &ctx); } drm_WARN_ON(encoder->base.dev, ret); } /* * Unpowered type-c dongles can take some time to boot and be * responsible, so here giving some time to those dongles to power up * and then retrying the probe. * * On many platforms the HDMI live state signal is known to be * unreliable, so we can't use it to detect if a sink is connected or * not. Instead we detect if it's connected based on whether we can * read the EDID or not. That in turn has a problem during disconnect, * since the HPD interrupt may be raised before the DDC lines get * disconnected (due to how the required length of DDC vs. HPD * connector pins are specified) and so we'll still be able to get a * valid EDID. To solve this schedule another detection cycle if this * time around we didn't detect any change in the sink's connection * status. * * Type-c connectors which get their HPD signal deasserted then * reasserted, without unplugging/replugging the sink from the * connector, introduce a delay until the AUX channel communication * becomes functional. Retry the detection for 5 seconds on type-c * connectors to account for this delay. */ if (state == INTEL_HOTPLUG_UNCHANGED && connector->hotplug_retries < (is_tc ? 5 : 1) && !dig_port->dp.is_mst) state = INTEL_HOTPLUG_RETRY; return state; } static bool lpt_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } static bool hsw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } static bool bdw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; } static struct intel_connector * intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) { struct intel_connector *connector; enum port port = dig_port->base.port; connector = intel_connector_alloc(); if (!connector) return NULL; dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); intel_hdmi_init_connector(dig_port, connector); return connector; } static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); if (dig_port->base.port != PORT_A) return false; if (dig_port->saved_port_bits & DDI_A_4_LANES) return false; /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only * supported configuration */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) return true; return false; } static int intel_ddi_max_lanes(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; int max_lanes = 4; if (DISPLAY_VER(dev_priv) >= 11) return max_lanes; if (port == PORT_A || port == PORT_E) { if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) max_lanes = port == PORT_A ? 4 : 0; else /* Both A and E share 2 lanes */ max_lanes = 2; } /* * Some BIOS might fail to set this bit on port A if eDP * wasn't lit up at boot. Force this bit set when needed * so we use the proper lane count for our calculations. */ if (intel_ddi_a_force_4_lanes(dig_port)) { drm_dbg_kms(&dev_priv->drm, "Forcing DDI_A_4_LANES for port A\n"); dig_port->saved_port_bits |= DDI_A_4_LANES; max_lanes = 4; } return max_lanes; } static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (port >= PORT_D_XELPD) return HPD_PORT_D + port - PORT_D_XELPD; else if (port >= PORT_TC1) return HPD_PORT_TC1 + port - PORT_TC1; else return HPD_PORT_A + port - PORT_A; } static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (port >= PORT_TC1) return HPD_PORT_C + port - PORT_TC1; else return HPD_PORT_A + port - PORT_A; } static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (port >= PORT_TC1) return HPD_PORT_TC1 + port - PORT_TC1; else return HPD_PORT_A + port - PORT_A; } static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (HAS_PCH_TGP(dev_priv)) return tgl_hpd_pin(dev_priv, port); if (port >= PORT_TC1) return HPD_PORT_C + port - PORT_TC1; else return HPD_PORT_A + port - PORT_A; } static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (port >= PORT_C) return HPD_PORT_TC1 + port - PORT_C; else return HPD_PORT_A + port - PORT_A; } static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (port == PORT_D) return HPD_PORT_A; if (HAS_PCH_TGP(dev_priv)) return icl_hpd_pin(dev_priv, port); return HPD_PORT_A + port - PORT_A; } static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) { if (HAS_PCH_TGP(dev_priv)) return icl_hpd_pin(dev_priv, port); return HPD_PORT_A + port - PORT_A; } static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port) { if (DISPLAY_VER(i915) >= 12) return port >= PORT_TC1; else if (DISPLAY_VER(i915) >= 11) return port >= PORT_C; else return false; } static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) { intel_dp_encoder_suspend(encoder); } static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); intel_tc_port_suspend(dig_port); } static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) { intel_dp_encoder_shutdown(encoder); intel_hdmi_encoder_shutdown(encoder); } static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); intel_tc_port_cleanup(dig_port); } #define port_tc_name(port) ((port) - PORT_TC1 + '1') #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') static bool port_strap_detected(struct drm_i915_private *i915, enum port port) { /* straps not used on skl+ */ if (DISPLAY_VER(i915) >= 9) return true; switch (port) { case PORT_A: return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; case PORT_B: return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED; case PORT_C: return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED; case PORT_D: return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED; case PORT_E: return true; /* no strap for DDI-E */ default: MISSING_CASE(port); return false; } } static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); return init_dp || intel_phy_is_tc(i915, phy); } static bool assert_has_icl_dsi(struct drm_i915_private *i915) { return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) && !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11, "Platform does not support DSI\n"); } static bool port_in_use(struct drm_i915_private *i915, enum port port) { struct intel_encoder *encoder; for_each_intel_encoder(&i915->drm, encoder) { /* FIXME what about second port for dual link DSI? */ if (encoder->port == port) return true; } return false; } void intel_ddi_init(struct drm_i915_private *dev_priv, const struct intel_bios_encoder_data *devdata) { struct intel_digital_port *dig_port; struct intel_encoder *encoder; bool init_hdmi, init_dp; enum port port; enum phy phy; port = intel_bios_encoder_port(devdata); if (port == PORT_NONE) return; if (!port_strap_detected(dev_priv, port)) { drm_dbg_kms(&dev_priv->drm, "Port %c strap not detected\n", port_name(port)); return; } if (!assert_port_valid(dev_priv, port)) return; if (port_in_use(dev_priv, port)) { drm_dbg_kms(&dev_priv->drm, "Port %c already claimed\n", port_name(port)); return; } if (intel_bios_encoder_supports_dsi(devdata)) { /* BXT/GLK handled elsewhere, for now at least */ if (!assert_has_icl_dsi(dev_priv)) return; icl_dsi_init(dev_priv, devdata); return; } phy = intel_port_to_phy(dev_priv, port); /* * On platforms with HTI (aka HDPORT), if it's enabled at boot it may * have taken over some of the PHYs and made them unavailable to the * driver. In that case we should skip initializing the corresponding * outputs. */ if (intel_hti_uses_phy(dev_priv, phy)) { drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n", port_name(port), phy_name(phy)); return; } init_hdmi = intel_bios_encoder_supports_dvi(devdata) || intel_bios_encoder_supports_hdmi(devdata); init_dp = intel_bios_encoder_supports_dp(devdata); if (intel_bios_encoder_is_lspcon(devdata)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP * is initialized before lspcon. */ init_dp = true; init_hdmi = false; drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n", port_name(port)); } if (!init_dp && !init_hdmi) { drm_dbg_kms(&dev_priv->drm, "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", port_name(port)); return; } if (intel_phy_is_snps(dev_priv, phy) && dev_priv->display.snps.phy_failed_calibration & BIT(phy)) { drm_dbg_kms(&dev_priv->drm, "SNPS PHY %c failed to calibrate, proceeding anyway\n", phy_name(phy)); } dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return; dig_port->aux_ch = AUX_CH_NONE; encoder = &dig_port->base; encoder->devdata = devdata; if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) { drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c/PHY %c", port_name(port - PORT_D_XELPD + PORT_D), phy_name(phy)); } else if (DISPLAY_VER(dev_priv) >= 12) { enum tc_port tc_port = intel_port_to_tc(dev_priv, port); drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %s%c/PHY %s%c", port >= PORT_TC1 ? "TC" : "", port >= PORT_TC1 ? port_tc_name(port) : port_name(port), tc_port != TC_PORT_NONE ? "TC" : "", tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); } else if (DISPLAY_VER(dev_priv) >= 11) { enum tc_port tc_port = intel_port_to_tc(dev_priv, port); drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c%s/PHY %s%c", port_name(port), port >= PORT_C ? " (TC)" : "", tc_port != TC_PORT_NONE ? "TC" : "", tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); } else { drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c/PHY %c", port_name(port), phy_name(phy)); } mutex_init(&dig_port->hdcp_mutex); dig_port->num_hdcp_streams = 0; encoder->hotplug = intel_ddi_hotplug; encoder->compute_output_type = intel_ddi_compute_output_type; encoder->compute_config = intel_ddi_compute_config; encoder->compute_config_late = intel_ddi_compute_config_late; encoder->enable = intel_enable_ddi; encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; encoder->disable = intel_disable_ddi; encoder->post_pll_disable = intel_ddi_post_pll_disable; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; encoder->get_hw_state = intel_ddi_get_hw_state; encoder->sync_state = intel_ddi_sync_state; encoder->initial_fastset_check = intel_ddi_initial_fastset_check; encoder->suspend = intel_ddi_encoder_suspend; encoder->shutdown = intel_ddi_encoder_shutdown; encoder->get_power_domains = intel_ddi_get_power_domains; encoder->type = INTEL_OUTPUT_DDI; encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); encoder->port = port; encoder->cloneable = 0; encoder->pipe_mask = ~0; if (DISPLAY_VER(dev_priv) >= 14) { encoder->enable_clock = intel_mtl_pll_enable; encoder->disable_clock = intel_mtl_pll_disable; encoder->port_pll_type = intel_mtl_port_pll_type; encoder->get_config = mtl_ddi_get_config; } else if (IS_DG2(dev_priv)) { encoder->enable_clock = intel_mpllb_enable; encoder->disable_clock = intel_mpllb_disable; encoder->get_config = dg2_ddi_get_config; } else if (IS_ALDERLAKE_S(dev_priv)) { encoder->enable_clock = adls_ddi_enable_clock; encoder->disable_clock = adls_ddi_disable_clock; encoder->is_clock_enabled = adls_ddi_is_clock_enabled; encoder->get_config = adls_ddi_get_config; } else if (IS_ROCKETLAKE(dev_priv)) { encoder->enable_clock = rkl_ddi_enable_clock; encoder->disable_clock = rkl_ddi_disable_clock; encoder->is_clock_enabled = rkl_ddi_is_clock_enabled; encoder->get_config = rkl_ddi_get_config; } else if (IS_DG1(dev_priv)) { encoder->enable_clock = dg1_ddi_enable_clock; encoder->disable_clock = dg1_ddi_disable_clock; encoder->is_clock_enabled = dg1_ddi_is_clock_enabled; encoder->get_config = dg1_ddi_get_config; } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { if (intel_ddi_is_tc(dev_priv, port)) { encoder->enable_clock = jsl_ddi_tc_enable_clock; encoder->disable_clock = jsl_ddi_tc_disable_clock; encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_combo_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; encoder->disable_clock = icl_ddi_combo_disable_clock; encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; encoder->get_config = icl_ddi_combo_get_config; } } else if (DISPLAY_VER(dev_priv) >= 11) { if (intel_ddi_is_tc(dev_priv, port)) { encoder->enable_clock = icl_ddi_tc_enable_clock; encoder->disable_clock = icl_ddi_tc_disable_clock; encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_tc_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; encoder->disable_clock = icl_ddi_combo_disable_clock; encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; encoder->get_config = icl_ddi_combo_get_config; } } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { /* BXT/GLK have fixed PLL->port mapping */ encoder->get_config = bxt_ddi_get_config; } else if (DISPLAY_VER(dev_priv) == 9) { encoder->enable_clock = skl_ddi_enable_clock; encoder->disable_clock = skl_ddi_disable_clock; encoder->is_clock_enabled = skl_ddi_is_clock_enabled; encoder->get_config = skl_ddi_get_config; } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { encoder->enable_clock = hsw_ddi_enable_clock; encoder->disable_clock = hsw_ddi_disable_clock; encoder->is_clock_enabled = hsw_ddi_is_clock_enabled; encoder->get_config = hsw_ddi_get_config; } if (DISPLAY_VER(dev_priv) >= 14) { encoder->set_signal_levels = intel_cx0_phy_set_signal_levels; } else if (IS_DG2(dev_priv)) { encoder->set_signal_levels = intel_snps_phy_set_signal_levels; } else if (DISPLAY_VER(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels; } else if (DISPLAY_VER(dev_priv) >= 11) { if (intel_phy_is_combo(dev_priv, phy)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = icl_mg_phy_set_signal_levels; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels; } else { encoder->set_signal_levels = hsw_set_signal_levels; } intel_ddi_buf_trans_init(encoder); if (DISPLAY_VER(dev_priv) >= 13) encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port); else if (IS_DG1(dev_priv)) encoder->hpd_pin = dg1_hpd_pin(dev_priv, port); else if (IS_ROCKETLAKE(dev_priv)) encoder->hpd_pin = rkl_hpd_pin(dev_priv, port); else if (DISPLAY_VER(dev_priv) >= 12) encoder->hpd_pin = tgl_hpd_pin(dev_priv, port); else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) encoder->hpd_pin = ehl_hpd_pin(dev_priv, port); else if (DISPLAY_VER(dev_priv) == 11) encoder->hpd_pin = icl_hpd_pin(dev_priv, port); else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) encoder->hpd_pin = skl_hpd_pin(dev_priv, port); else encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); if (DISPLAY_VER(dev_priv) >= 11) dig_port->saved_port_bits = intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_PORT_REVERSAL; else dig_port->saved_port_bits = intel_de_read(dev_priv, DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); if (intel_bios_encoder_lane_reversal(devdata)) dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL; dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); if (need_aux_ch(encoder, init_dp)) { dig_port->aux_ch = intel_dp_aux_ch(encoder); if (dig_port->aux_ch == AUX_CH_NONE) goto err; } if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = !intel_bios_encoder_supports_typec_usb(devdata) && !intel_bios_encoder_supports_tbt(devdata); if (!is_legacy && init_hdmi) { is_legacy = !init_dp; drm_dbg_kms(&dev_priv->drm, "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n", port_name(port), str_yes_no(init_dp), is_legacy ? "legacy" : "non-legacy"); } encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete; encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete; if (intel_tc_port_init(dig_port, is_legacy) < 0) goto err; } drm_WARN_ON(&dev_priv->drm, port > PORT_I); dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); if (DISPLAY_VER(dev_priv) >= 11) { if (intel_phy_is_tc(dev_priv, phy)) dig_port->connected = intel_tc_port_connected; else dig_port->connected = lpt_digital_port_connected; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { dig_port->connected = bdw_digital_port_connected; } else if (DISPLAY_VER(dev_priv) == 9) { dig_port->connected = lpt_digital_port_connected; } else if (IS_BROADWELL(dev_priv)) { if (port == PORT_A) dig_port->connected = bdw_digital_port_connected; else dig_port->connected = lpt_digital_port_connected; } else if (IS_HASWELL(dev_priv)) { if (port == PORT_A) dig_port->connected = hsw_digital_port_connected; else dig_port->connected = lpt_digital_port_connected; } intel_infoframe_init(dig_port); if (init_dp) { if (!intel_ddi_init_dp_connector(dig_port)) goto err; dig_port->hpd_pulse = intel_dp_hpd_pulse; if (dig_port->dp.mso_link_count) encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); } /* * In theory we don't need the encoder->type check, * but leave it just in case we have some really bad VBTs... */ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { if (!intel_ddi_init_hdmi_connector(dig_port)) goto err; } return; err: drm_encoder_cleanup(&encoder->base); kfree(dig_port); }
linux-master
drivers/gpu/drm/i915/display/intel_ddi.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <linux/util_macros.h> #include "i915_reg.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_snps_phy.h" #include "intel_snps_phy_regs.h" /** * DOC: Synopsis PHY support * * Synopsis PHYs are primarily programmed by looking up magic register values * in tables rather than calculating the necessary values at runtime. * * Of special note is that the SNPS PHYs include a dedicated port PLL, known as * an "MPLLB." The MPLLB replaces the shared DPLL functionality used on other * platforms and must be programming directly during the modeset sequence * since it is not handled by the shared DPLL framework as on other platforms. */ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915) { enum phy phy; for_each_phy_masked(phy, ~0) { if (!intel_phy_is_snps(i915, phy)) continue; /* * If calibration does not complete successfully, we'll remember * which phy was affected and skip setup of the corresponding * output later. */ if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) i915->display.snps.phy_failed_calibration |= BIT(phy); } } void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915, enum phy phy, bool enable) { u32 val; if (!intel_phy_is_snps(i915, phy)) return; val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, enable ? 2 : 3); intel_de_rmw(i915, SNPS_PHY_TX_REQ(phy), SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val); } void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); u32 val = 0; val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing); val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor); val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor); intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val); } } /* * Basic DP link rates with 100 MHz reference clock. */ static const struct intel_mpllb_state dg2_dp_rbr_100 = { .clock = 162000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3), }; static const struct intel_mpllb_state dg2_dp_hbr1_100 = { .clock = 270000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), }; static const struct intel_mpllb_state dg2_dp_hbr2_100 = { .clock = 540000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), }; static const struct intel_mpllb_state dg2_dp_hbr3_100 = { .clock = 810000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 292), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), }; static const struct intel_mpllb_state dg2_dp_uhbr10_100 = { .clock = 1000000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 21) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 368), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), /* * SSC will be enabled, DP UHBR has a minimum SSC requirement. */ .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 58982), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 76101), }; static const struct intel_mpllb_state dg2_dp_uhbr13_100 = { .clock = 1350000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 45) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 508), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), /* * SSC will be enabled, DP UHBR has a minimum SSC requirement. */ .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 79626), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 102737), }; static const struct intel_mpllb_state * const dg2_dp_100_tables[] = { &dg2_dp_rbr_100, &dg2_dp_hbr1_100, &dg2_dp_hbr2_100, &dg2_dp_hbr3_100, &dg2_dp_uhbr10_100, &dg2_dp_uhbr13_100, NULL, }; /* * eDP link rates with 100 MHz reference clock. */ static const struct intel_mpllb_state dg2_edp_r216 = { .clock = 216000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752), }; static const struct intel_mpllb_state dg2_edp_r243 = { .clock = 243000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 356), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 57331), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 73971), }; static const struct intel_mpllb_state dg2_edp_r324 = { .clock = 324000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 38221), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 49314), }; static const struct intel_mpllb_state dg2_edp_r432 = { .clock = 432000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961), .mpllb_sscstep = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752), }; static const struct intel_mpllb_state * const dg2_edp_tables[] = { &dg2_dp_rbr_100, &dg2_edp_r216, &dg2_edp_r243, &dg2_dp_hbr1_100, &dg2_edp_r324, &dg2_edp_r432, &dg2_dp_hbr2_100, &dg2_dp_hbr3_100, NULL, }; /* * HDMI link rates with 100 MHz reference clock. */ static const struct intel_mpllb_state dg2_hdmi_25_175 = { .clock = 25175, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 143), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36663) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 71), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_27_0 = { .clock = 27000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_74_25 = { .clock = 74250, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_148_5 = { .clock = 148500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; /* values in the below table are calculted using the algo */ static const struct intel_mpllb_state dg2_hdmi_25200 = { .clock = 25200, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_27027 = { .clock = 27027, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_28320 = { .clock = 28320, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_30240 = { .clock = 30240, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_31500 = { .clock = 31500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_36000 = { .clock = 36000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_40000 = { .clock = 40000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_49500 = { .clock = 49500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_50000 = { .clock = 50000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_57284 = { .clock = 57284, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_58000 = { .clock = 58000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_65000 = { .clock = 65000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_71000 = { .clock = 71000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_74176 = { .clock = 74176, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_75000 = { .clock = 75000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_78750 = { .clock = 78750, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_85500 = { .clock = 85500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_88750 = { .clock = 88750, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_106500 = { .clock = 106500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_108000 = { .clock = 108000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_115500 = { .clock = 115500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_119000 = { .clock = 119000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_135000 = { .clock = 135000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_138500 = { .clock = 138500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_147160 = { .clock = 147160, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_148352 = { .clock = 148352, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_154000 = { .clock = 154000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_162000 = { .clock = 162000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_209800 = { .clock = 209800, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_262750 = { .clock = 262750, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_267300 = { .clock = 267300, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_268500 = { .clock = 268500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_296703 = { .clock = 296703, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_241500 = { .clock = 241500, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_319890 = { .clock = 319890, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_497750 = { .clock = 497750, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_592000 = { .clock = 592000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_593407 = { .clock = 593407, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_297 = { .clock = 297000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state dg2_hdmi_594 = { .clock = 594000, .ref_control = REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), .mpllb_cp = REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), .mpllb_div = REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), .mpllb_fracn1 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5), .mpllb_fracn2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2), .mpllb_sscen = REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_25_175, &dg2_hdmi_27_0, &dg2_hdmi_74_25, &dg2_hdmi_148_5, &dg2_hdmi_297, &dg2_hdmi_594, &dg2_hdmi_25200, &dg2_hdmi_27027, &dg2_hdmi_28320, &dg2_hdmi_30240, &dg2_hdmi_31500, &dg2_hdmi_36000, &dg2_hdmi_40000, &dg2_hdmi_49500, &dg2_hdmi_50000, &dg2_hdmi_57284, &dg2_hdmi_58000, &dg2_hdmi_65000, &dg2_hdmi_71000, &dg2_hdmi_74176, &dg2_hdmi_75000, &dg2_hdmi_78750, &dg2_hdmi_85500, &dg2_hdmi_88750, &dg2_hdmi_106500, &dg2_hdmi_108000, &dg2_hdmi_115500, &dg2_hdmi_119000, &dg2_hdmi_135000, &dg2_hdmi_138500, &dg2_hdmi_147160, &dg2_hdmi_148352, &dg2_hdmi_154000, &dg2_hdmi_162000, &dg2_hdmi_209800, &dg2_hdmi_241500, &dg2_hdmi_262750, &dg2_hdmi_267300, &dg2_hdmi_268500, &dg2_hdmi_296703, &dg2_hdmi_319890, &dg2_hdmi_497750, &dg2_hdmi_592000, &dg2_hdmi_593407, NULL, }; static const struct intel_mpllb_state * const * intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { return dg2_edp_tables; } else if (intel_crtc_has_dp_encoder(crtc_state)) { return dg2_dp_100_tables; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { return dg2_hdmi_tables; } MISSING_CASE(encoder->type); return NULL; } int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_mpllb_state * const *tables; int i; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { if (intel_snps_phy_check_hdmi_link_rate(crtc_state->port_clock) != MODE_OK) { /* * FIXME: Can only support fixed HDMI frequencies * until we have a proper algorithm under a valid * license. */ drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n", crtc_state->port_clock); return -EINVAL; } } tables = intel_mpllb_tables_get(crtc_state, encoder); if (!tables) return -EINVAL; for (i = 0; tables[i]; i++) { if (crtc_state->port_clock == tables[i]->clock) { crtc_state->mpllb_state = *tables[i]; return 0; } } return -EINVAL; } void intel_mpllb_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); /* * 3. Software programs the following PLL registers for the desired * frequency. */ intel_de_write(dev_priv, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp); intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div); intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2); intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen); intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep); intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1); intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2); /* * 4. If the frequency will result in a change to the voltage * requirement, follow the Display Voltage Frequency Switching - * Sequence Before Frequency Change. * * We handle this step in bxt_set_cdclk(). */ /* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); /* * 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This * will keep the PLL running during the DDI lane programming and any * typeC DP cable disconnect. Do not set the force before enabling the * PLL because that will start the PLL before it has sampled the * divider values. */ intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div | SNPS_PHY_MPLLB_FORCE_EN); /* * 10. Software polls on register DPLL_ENABLE [PLL Lock] to confirm PLL * is locked at new settings. This register bit is sampling PHY * dp_mpllb_state interface signal. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5)) drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 11. If the frequency will result in a change to the voltage * requirement, follow the Display Voltage Frequency Switching - * Sequence After Frequency Change. * * We handle this step in bxt_set_cdclk(). */ } void intel_mpllb_disable(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); /* * 1. If the frequency will result in a change to the voltage * requirement, follow the Display Voltage Frequency Switching - * Sequence Before Frequency Change. * * We handle this step in bxt_set_cdclk(). */ /* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */ intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0); /* * 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0". * This will allow the PLL to stop running. */ intel_de_rmw(i915, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0); /* * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment * (dp_txX_ack) that the new transmitter setting request is completed. */ if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5)) drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 6. If the frequency will result in a change to the voltage * requirement, follow the Display Voltage Frequency Switching - * Sequence After Frequency Change. * * We handle this step in bxt_set_cdclk(). */ } int intel_mpllb_calc_port_clock(struct intel_encoder *encoder, const struct intel_mpllb_state *pll_state) { unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; unsigned int multiplier, tx_clk_div, refclk; bool frac_en; if (0) refclk = 38400; else refclk = 100000; refclk >>= REG_FIELD_GET(SNPS_PHY_MPLLB_REF_CLK_DIV, pll_state->mpllb_div2) - 1; frac_en = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_EN, pll_state->mpllb_fracn1); if (frac_en) { frac_quot = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_QUOT, pll_state->mpllb_fracn2); frac_rem = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_REM, pll_state->mpllb_fracn2); frac_den = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_DEN, pll_state->mpllb_fracn1); } multiplier = REG_FIELD_GET(SNPS_PHY_MPLLB_MULTIPLIER, pll_state->mpllb_div2) / 2 + 16; tx_clk_div = REG_FIELD_GET(SNPS_PHY_MPLLB_TX_CLK_DIV, pll_state->mpllb_div); return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) + DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den), 10 << (tx_clk_div + 16)); } void intel_mpllb_readout_hw_state(struct intel_encoder *encoder, struct intel_mpllb_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy)); pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy)); pll_state->mpllb_div2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV2(phy)); pll_state->mpllb_sscen = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy)); pll_state->mpllb_sscstep = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy)); pll_state->mpllb_fracn1 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy)); pll_state->mpllb_fracn2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy)); /* * REF_CONTROL is under firmware control and never programmed by the * driver; we read it only for sanity checking purposes. The bspec * only tells us the expected value for one field in this register, * so we'll only read out those specific bits here. */ pll_state->ref_control = intel_de_read(dev_priv, SNPS_PHY_REF_CONTROL(phy)) & SNPS_PHY_REF_CONTROL_REF_RANGE; /* * MPLLB_DIV is programmed twice, once with the software-computed * state, then again with the MPLLB_FORCE_EN bit added. Drop that * extra bit during readout so that we return the actual expected * software state. */ pll_state->mpllb_div &= ~SNPS_PHY_MPLLB_FORCE_EN; } int intel_snps_phy_check_hdmi_link_rate(int clock) { const struct intel_mpllb_state * const *tables = dg2_hdmi_tables; int i; for (i = 0; tables[i]; i++) { if (clock == tables[i]->clock) return MODE_OK; } return MODE_CLOCK_RANGE; } void intel_mpllb_state_verify(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_mpllb_state mpllb_hw_state = { 0 }; struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state; struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_encoder *encoder; if (!IS_DG2(i915)) return; if (!new_crtc_state->hw.active) return; /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ if (!intel_crtc_needs_modeset(new_crtc_state) && !intel_crtc_needs_fastset(new_crtc_state)) return; encoder = intel_get_crtc_new_encoder(state, new_crtc_state); intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); #define MPLLB_CHECK(__name) \ I915_STATE_WARN(i915, mpllb_sw_state->__name != mpllb_hw_state.__name, \ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \ crtc->base.base.id, crtc->base.name, \ __stringify(__name), \ mpllb_sw_state->__name, mpllb_hw_state.__name) MPLLB_CHECK(mpllb_cp); MPLLB_CHECK(mpllb_div); MPLLB_CHECK(mpllb_div2); MPLLB_CHECK(mpllb_fracn1); MPLLB_CHECK(mpllb_fracn2); MPLLB_CHECK(mpllb_sscen); MPLLB_CHECK(mpllb_sscstep); /* * ref_control is handled by the hardware/firemware and never * programmed by the software, but the proper values are supplied * in the bspec for verification purposes. */ MPLLB_CHECK(ref_control); #undef MPLLB_CHECK }
linux-master
drivers/gpu/drm/i915/display/intel_snps_phy.c
/* * Copyright © 2014-2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "i915_reg.h" #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" #include "vlv_sideband.h" /** * DOC: DPIO * * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI * ports. DPIO is the name given to such a display PHY. These PHYs * don't follow the standard programming model using direct MMIO * registers, and instead their registers must be accessed trough IOSF * sideband. VLV has one such PHY for driving ports B and C, and CHV * adds another PHY for driving port D. Each PHY responds to specific * IOSF-SB port. * * Each display PHY is made up of one or two channels. Each channel * houses a common lane part which contains the PLL and other common * logic. CH0 common lane also contains the IOSF-SB logic for the * Common Register Interface (CRI) ie. the DPIO registers. CRI clock * must be running when any DPIO registers are accessed. * * In addition to having their own registers, the PHYs are also * controlled through some dedicated signals from the display * controller. These include PLL reference clock enable, PLL enable, * and CRI clock selection, for example. * * Eeach channel also has two splines (also called data lanes), and * each spline is made up of one Physical Access Coding Sub-Layer * (PCS) block and two TX lanes. So each channel has two PCS blocks * and four TX lanes. The TX lanes are used as DP lanes or TMDS * data/clock pairs depending on the output type. * * Additionally the PHY also contains an AUX lane with AUX blocks * for each channel. This is used for DP AUX communication, but * this fact isn't really relevant for the driver since AUX is * controlled from the display controller side. No DPIO registers * need to be accessed during AUX communication, * * Generally on VLV/CHV the common lane corresponds to the pipe and * the spline (PCS/TX) corresponds to the port. * * For dual channel PHY (VLV/CHV): * * pipe A == CMN/PLL/REF CH0 * * pipe B == CMN/PLL/REF CH1 * * port B == PCS/TX CH0 * * port C == PCS/TX CH1 * * This is especially important when we cross the streams * ie. drive port B with pipe B, or port C with pipe A. * * For single channel PHY (CHV): * * pipe C == CMN/PLL/REF CH0 * * port D == PCS/TX CH0 * * On BXT the entire PHY channel corresponds to the port. That means * the PLL is also now associated with the port rather than the pipe, * and so the clock needs to be routed to the appropriate transcoder. * Port A PLL is directly connected to transcoder EDP and port B/C * PLLs can be routed to any transcoder A/B/C. * * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is * digital port D (CHV) or port A (BXT). :: * * * Dual channel PHY (VLV/CHV/BXT) * --------------------------------- * | CH0 | CH1 | * | CMN/PLL/REF | CMN/PLL/REF | * |---------------|---------------| Display PHY * | PCS01 | PCS23 | PCS01 | PCS23 | * |-------|-------|-------|-------| * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3| * --------------------------------- * | DDI0 | DDI1 | DP/HDMI ports * --------------------------------- * * Single channel PHY (CHV/BXT) * ----------------- * | CH0 | * | CMN/PLL/REF | * |---------------| Display PHY * | PCS01 | PCS23 | * |-------|-------| * |TX0|TX1|TX2|TX3| * ----------------- * | DDI2 | DP/HDMI port * ----------------- */ /** * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy */ struct bxt_ddi_phy_info { /** * @dual_channel: true if this phy has a second channel. */ bool dual_channel; /** * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor. * Otherwise the GRC value will be copied from the phy indicated by * this field. */ enum dpio_phy rcomp_phy; /** * @reset_delay: delay in us to wait before setting the common reset * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy. */ int reset_delay; /** * @pwron_mask: Mask with the appropriate bit set that would cause the * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON. */ u32 pwron_mask; /** * @channel: struct containing per channel information. */ struct { /** * @channel.port: which port maps to this channel. */ enum port port; } channel[2]; }; static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { [DPIO_PHY0] = { .dual_channel = true, .rcomp_phy = DPIO_PHY1, .pwron_mask = BIT(0), .channel = { [DPIO_CH0] = { .port = PORT_B }, [DPIO_CH1] = { .port = PORT_C }, } }, [DPIO_PHY1] = { .dual_channel = false, .rcomp_phy = -1, .pwron_mask = BIT(1), .channel = { [DPIO_CH0] = { .port = PORT_A }, } }, }; static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { [DPIO_PHY0] = { .dual_channel = false, .rcomp_phy = DPIO_PHY1, .pwron_mask = BIT(0), .reset_delay = 20, .channel = { [DPIO_CH0] = { .port = PORT_B }, } }, [DPIO_PHY1] = { .dual_channel = false, .rcomp_phy = -1, .pwron_mask = BIT(3), .reset_delay = 20, .channel = { [DPIO_CH0] = { .port = PORT_A }, } }, [DPIO_PHY2] = { .dual_channel = false, .rcomp_phy = DPIO_PHY1, .pwron_mask = BIT(1), .reset_delay = 20, .channel = { [DPIO_CH0] = { .port = PORT_C }, } }, }; static const struct bxt_ddi_phy_info * bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) { if (IS_GEMINILAKE(dev_priv)) { *count = ARRAY_SIZE(glk_ddi_phy_info); return glk_ddi_phy_info; } else { *count = ARRAY_SIZE(bxt_ddi_phy_info); return bxt_ddi_phy_info; } } static const struct bxt_ddi_phy_info * bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) { int count; const struct bxt_ddi_phy_info *phy_list = bxt_get_phy_list(dev_priv, &count); return &phy_list[phy]; } void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { const struct bxt_ddi_phy_info *phy_info, *phys; int i, count; phys = bxt_get_phy_list(dev_priv, &count); for (i = 0; i < count; i++) { phy_info = &phys[i]; if (port == phy_info->channel[DPIO_CH0].port) { *phy = i; *ch = DPIO_CH0; return; } if (phy_info->dual_channel && port == phy_info->channel[DPIO_CH1].port) { *phy = i; *ch = DPIO_CH1; return; } } drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c", port_name(port)); *phy = DPIO_PHY0; *ch = DPIO_CH0; } void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; enum dpio_channel ch; enum dpio_phy phy; int n_entries; u32 val; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch); /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch)); val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE); val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT | trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT; intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch)); val &= ~SCALE_DCOMP_METHOD; if (trans->entries[level].bxt.enable) val |= SCALE_DCOMP_METHOD; if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) drm_err(&dev_priv->drm, "Disabled scaling while ouniqetrangenmethod was set"); intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch)); val &= ~DE_EMPHASIS; val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT; intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT; intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); } bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; phy_info = bxt_get_phy_info(dev_priv, phy); if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) return false; if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) & (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { drm_dbg(&dev_priv->drm, "DDI PHY %d powered, but power hasn't settled\n", phy); return false; } if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { drm_dbg(&dev_priv->drm, "DDI PHY %d powered, but still in reset\n", phy); return false; } return true; } static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) { u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy)); return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; } static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, enum dpio_phy phy) { if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n", phy); } static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { /* Still read out the GRC value for state verification */ if (phy_info->rcomp_phy != -1) dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy); if (bxt_ddi_phy_verify_state(dev_priv, phy)) { drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, " "won't reprogram it\n", phy); return; } drm_dbg(&dev_priv->drm, "DDI PHY %d enabled with invalid state, " "force reprogramming it\n", phy); } intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); /* * The PHY registers start out inaccessible and respond to reads with * all 1s. Eventually they become accessible as they power up, then * the reserved bit will give the default 0. Poll on the reserved bit * becoming 0 to find when the PHY is accessible. * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ if (intel_wait_for_register_fw(&dev_priv->uncore, BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) drm_err(&dev_priv->drm, "timeout during PHY%d power on\n", phy); /* Program PLL Rcomp code offset */ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, 0xE4 << IREF0RC_OFFSET_SHIFT); intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, 0xE4 << IREF1RC_OFFSET_SHIFT); /* Program power gating */ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); if (phy_info->dual_channel) intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, DW6_OLDO_DYN_PWR_DOWN_EN); if (phy_info->rcomp_phy != -1) { u32 grc_code; bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); /* * PHY0 isn't connected to an RCOMP resistor so copy over * the corresponding calibrated value from PHY1, and disable * the automatic calibration on PHY0. */ val = bxt_get_grc(dev_priv, phy_info->rcomp_phy); dev_priv->display.state.bxt_phy_grc = val; grc_code = val << GRC_CODE_FAST_SHIFT | val << GRC_CODE_SLOW_SHIFT | val; intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), 0, GRC_DIS | GRC_RDY_OVRD); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; phy_info = bxt_get_phy_info(dev_priv, phy); intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info = bxt_get_phy_info(dev_priv, phy); enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; lockdep_assert_held(&dev_priv->display.power.domains.lock); was_enabled = true; if (rcomp_phy != -1) was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy); /* * We need to copy the GRC calibration value from rcomp_phy, * so make sure it's powered up. */ if (!was_enabled) _bxt_ddi_phy_init(dev_priv, rcomp_phy); _bxt_ddi_phy_init(dev_priv, phy); if (!was_enabled) bxt_ddi_phy_uninit(dev_priv, rcomp_phy); } static bool __printf(6, 7) __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, i915_reg_t reg, u32 mask, u32 expected, const char *reg_fmt, ...) { struct va_format vaf; va_list args; u32 val; val = intel_de_read(dev_priv, reg); if ((val & mask) == expected) return true; va_start(args, reg_fmt); vaf.fmt = reg_fmt; vaf.va = &args; drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " "current %08x, expected %08x (mask %08x)\n", phy, &vaf, reg.reg, val, (val & ~mask) | expected, mask); va_end(args); return false; } bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; u32 mask; bool ok; phy_info = bxt_get_phy_info(dev_priv, phy); #define _CHK(reg, mask, exp, fmt, ...) \ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) if (!bxt_ddi_phy_is_enabled(dev_priv, phy)) return false; ok = true; /* PLL Rcomp code offset */ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, "BXT_PORT_CL1CM_DW9(%d)", phy); ok &= _CHK(BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT, "BXT_PORT_CL1CM_DW10(%d)", phy); /* Power gating */ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG; ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask, "BXT_PORT_CL1CM_DW28(%d)", phy); if (phy_info->dual_channel) ok &= _CHK(BXT_PORT_CL2CM_DW6(phy), DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN, "BXT_PORT_CL2CM_DW6(%d)", phy); if (phy_info->rcomp_phy != -1) { u32 grc_code = dev_priv->display.state.bxt_phy_grc; grc_code = grc_code << GRC_CODE_FAST_SHIFT | grc_code << GRC_CODE_SLOW_SHIFT | grc_code; mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK | GRC_CODE_NOM_MASK; ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code, "BXT_PORT_REF_DW6(%d)", phy); mask = GRC_DIS | GRC_RDY_OVRD; ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask, "BXT_PORT_REF_DW8(%d)", phy); } return ok; #undef _CHK } u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) { switch (lane_count) { case 1: return 0; case 2: return BIT(2) | BIT(0); case 4: return BIT(3) | BIT(2) | BIT(0); default: MISSING_CASE(lane_count); return 0; } } void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 lane_lat_optim_mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { u32 val = intel_de_read(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane)); /* * Note that on CHV this flag is called UPAR, but has * the same function. */ val &= ~LATENCY_OPTIM; if (lane_lat_optim_mask & BIT(lane)) val |= LATENCY_OPTIM; intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), val); } } u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; enum dpio_phy phy; enum dpio_channel ch; int lane; u8 mask; bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); mask = 0; for (lane = 0; lane < 4; lane++) { u32 val = intel_de_read(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane)); if (val & LATENCY_OPTIM) mask |= BIT(lane); } return mask; } enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port) { switch (dig_port->base.port) { default: MISSING_CASE(dig_port->base.port); fallthrough; case PORT_B: case PORT_D: return DPIO_CH0; case PORT_C: return DPIO_CH1; } } enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port) { switch (dig_port->base.port) { default: MISSING_CASE(dig_port->base.port); fallthrough; case PORT_B: case PORT_C: return DPIO_PHY0; case PORT_D: return DPIO_PHY1; } } enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) { switch (pipe) { default: MISSING_CASE(pipe); fallthrough; case PIPE_A: case PIPE_C: return DPIO_CH0; case PIPE_B: return DPIO_CH1; } } void chv_set_phy_signal_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, u32 deemph_reg_value, u32 margin_reg_value, bool uniq_trans_scale) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; u32 val; int i; vlv_dpio_get(dev_priv); /* Clear calc init */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); } val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); } /* Program swing deemph */ for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); val &= ~DPIO_SWING_DEEMPH9P5_MASK; val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); } /* Program swing margin */ for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); val &= ~DPIO_SWING_MARGIN000_MASK; val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; /* * Supposedly this value shouldn't matter when unique transition * scale is disabled, but in fact it does matter. Let's just * always program the same value and hope it's OK. */ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); } /* * The document said it needs to set bit 27 for ch0 and bit 26 * for ch1. Might be a typo in the doc. * For now, for this unique transition scale selection, set bit * 27 for ch0 and ch1. */ for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); if (uniq_trans_scale) val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; else val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); } /* Start swing calculation */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); } vlv_dpio_put(dev_priv); } void chv_data_lane_soft_reset(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 val; val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); if (reset) val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); else val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); if (reset) val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); else val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); } val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; if (reset) val &= ~DPIO_PCS_CLK_SOFT_RESET; else val |= DPIO_PCS_CLK_SOFT_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; if (reset) val &= ~DPIO_PCS_CLK_SOFT_RESET; else val |= DPIO_PCS_CLK_SOFT_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); } } void chv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; unsigned int lane_mask = intel_dp_unused_lane_mask(crtc_state->lane_count); u32 val; /* * Must trick the second common lane into life. * Otherwise we can't even access the PLL. */ if (ch == DPIO_CH0 && pipe == PIPE_B) dig_port->release_cl2_override = !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); chv_phy_powergate_lanes(encoder, true, lane_mask); vlv_dpio_get(dev_priv); /* Assert data lane reset */ chv_data_lane_soft_reset(encoder, crtc_state, true); /* program left/right clock distribution */ if (pipe != PIPE_B) { val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA1_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA1_FORCE; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); } else { val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA2_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA2_FORCE; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); } /* program clock channel usage */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; if (pipe != PIPE_B) val &= ~CHV_PCS_USEDCLKCHANNEL; else val |= CHV_PCS_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; if (pipe != PIPE_B) val &= ~CHV_PCS_USEDCLKCHANNEL; else val |= CHV_PCS_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); } /* * This a a bit weird since generally CL * matches the pipe, but here we need to * pick the CL based on the port. */ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); if (pipe != PIPE_B) val &= ~CHV_CMN_USEDCLKCHANNEL; else val |= CHV_CMN_USEDCLKCHANNEL; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); vlv_dpio_put(dev_priv); } void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; int data, i, stagger; u32 val; vlv_dpio_get(dev_priv); /* allow hardware to manage TX FIFO reset source */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); val &= ~DPIO_LANEDESKEW_STRAP_OVRD; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); val &= ~DPIO_LANEDESKEW_STRAP_OVRD; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); } /* Program Tx lane latency optimal setting*/ for (i = 0; i < crtc_state->lane_count; i++) { /* Set the upar bit */ if (crtc_state->lane_count == 1) data = 0x0; else data = (i == 1) ? 0x0 : 0x1; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), data << DPIO_UPAR_SHIFT); } /* Data lane stagger programming */ if (crtc_state->port_clock > 270000) stagger = 0x18; else if (crtc_state->port_clock > 135000) stagger = 0xd; else if (crtc_state->port_clock > 67500) stagger = 0x7; else if (crtc_state->port_clock > 33750) stagger = 0x4; else stagger = 0x2; val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); val |= DPIO_TX2_STAGGER_MASK(0x1f); vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); val |= DPIO_TX2_STAGGER_MASK(0x1f); vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); } vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), DPIO_LANESTAGGER_STRAP(stagger) | DPIO_LANESTAGGER_STRAP_OVRD | DPIO_TX1_STAGGER_MASK(0x1f) | DPIO_TX1_STAGGER_MULT(6) | DPIO_TX2_STAGGER_MULT(0)); if (crtc_state->lane_count > 2) { vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), DPIO_LANESTAGGER_STRAP(stagger) | DPIO_LANESTAGGER_STRAP_OVRD | DPIO_TX1_STAGGER_MASK(0x1f) | DPIO_TX1_STAGGER_MULT(7) | DPIO_TX2_STAGGER_MULT(5)); } /* Deassert data lane reset */ chv_data_lane_soft_reset(encoder, crtc_state, false); vlv_dpio_put(dev_priv); } void chv_phy_release_cl2_override(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (dig_port->release_cl2_override) { chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); dig_port->release_cl2_override = false; } } void chv_phy_post_pll_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; u32 val; vlv_dpio_get(dev_priv); /* disable left/right clock distribution */ if (pipe != PIPE_B) { val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); } else { val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); } vlv_dpio_put(dev_priv); /* * Leave the power down bit cleared for at least one * lane so that chv_powergate_phy_ch() will power * on something when the channel is otherwise unused. * When the port is off and the override is removed * the lanes power down anyway, so otherwise it doesn't * really matter what the state of power down bits is * after this. */ chv_phy_powergate_lanes(encoder, false, 0x0); } void vlv_set_phy_signal_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, u32 demph_reg_value, u32 preemph_reg_value, u32 uniqtranscale_reg_value, u32 tx3_demph) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; vlv_dpio_get(dev_priv); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), uniqtranscale_reg_value); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); if (tx3_demph) vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); vlv_dpio_put(dev_priv); } void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; /* Program Tx lane resets to default */ vlv_dpio_get(dev_priv); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | DPIO_PCS_CLK_SOFT_RESET); /* Fix up inter-pair skew failure */ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); vlv_dpio_put(dev_priv); } void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; u32 val; vlv_dpio_get(dev_priv); /* Enable clock channels for this port */ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); val = 0; if (pipe) val |= (1<<21); else val &= ~(1<<21); val |= 0x001000c4; vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); /* Program lane clock */ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); vlv_dpio_put(dev_priv); } void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; vlv_dpio_get(dev_priv); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); vlv_dpio_put(dev_priv); }
linux-master
drivers/gpu/drm/i915/display/intel_dpio_phy.c
/* * Copyright 2006 Dave Airlie <[email protected]> * Copyright © 2006-2007 Intel Corporation * Jesse Barnes <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> */ #include <linux/delay.h> #include <linux/export.h> #include <linux/i2c.h> #include <linux/slab.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_panel.h" #include "intel_sdvo.h" #include "intel_sdvo_regs.h" #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) static const char * const tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , "PAL_N" , "PAL_NC" , "PAL_60" , "SECAM_B" , "SECAM_D" , "SECAM_G" , "SECAM_K" , "SECAM_K1", "SECAM_L" , "SECAM_60" }; #define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names) struct intel_sdvo { struct intel_encoder base; struct i2c_adapter *i2c; u8 slave_addr; struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ i915_reg_t sdvo_reg; /* Active outputs controlled by this SDVO output */ u16 controlled_output; /* * Capabilities of the SDVO device returned by * intel_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; u8 colorimetry_cap; /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; /* * For multiple function SDVO device, * this is for current attached outputs. */ u16 attached_output; /* * Hotplug activation bits for this device */ u16 hotplug_active; enum port port; /* DDC bus used by this SDVO encoder */ u8 ddc_bus; /* * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd */ u8 dtd_sdvo_flags; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ u16 output_flag; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; struct drm_property *top; struct drm_property *bottom; struct drm_property *hpos; struct drm_property *vpos; struct drm_property *contrast; struct drm_property *saturation; struct drm_property *hue; struct drm_property *sharpness; struct drm_property *flicker_filter; struct drm_property *flicker_filter_adaptive; struct drm_property *flicker_filter_2d; struct drm_property *tv_chroma_filter; struct drm_property *tv_luma_filter; struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ struct drm_property *brightness; /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; /** * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; }; struct intel_sdvo_connector_state { /* base.base: tv.saturation/contrast/hue/brightness */ struct intel_digital_connector_state base; struct { unsigned overscan_h, overscan_v, hpos, vpos, sharpness; unsigned flicker_filter, flicker_filter_2d, flicker_filter_adaptive; unsigned chroma_filter, luma_filter, dot_crawl; } tv; }; static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base); } static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector) { return to_sdvo(intel_attached_encoder(connector)); } static struct intel_sdvo_connector * to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(connector, struct intel_sdvo_connector, base.base); } #define to_intel_sdvo_connector_state(conn_state) \ container_of((conn_state), struct intel_sdvo_connector_state, base.base) static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type); static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector); /* * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 bval = val, cval = val; int i; if (HAS_PCH_SPLIT(dev_priv)) { intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val); intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ if (HAS_PCH_IBX(dev_priv)) { intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val); intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg); } return; } if (intel_sdvo->port == PORT_B) cval = intel_de_read(dev_priv, GEN3_SDVOC); else bval = intel_de_read(dev_priv, GEN3_SDVOB); /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { intel_de_write(dev_priv, GEN3_SDVOB, bval); intel_de_posting_read(dev_priv, GEN3_SDVOB); intel_de_write(dev_priv, GEN3_SDVOC, cval); intel_de_posting_read(dev_priv, GEN3_SDVOC); } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { struct i2c_msg msgs[] = { { .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, } }; int ret; if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } #define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ } /** Mapping of command numbers to names, for debug output */ static const struct { u8 cmd; const char *name; } __packed sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(RESET), SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES), SDVO_CMD_NAME_ENTRY(GET_POWER_STATE), SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE), SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE), SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH), SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS), SDVO_CMD_NAME_ENTRY(GET_HPOS), SDVO_CMD_NAME_ENTRY(SET_HPOS), SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS), SDVO_CMD_NAME_ENTRY(GET_VPOS), SDVO_CMD_NAME_ENTRY(SET_VPOS), SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(GET_SATURATION), SDVO_CMD_NAME_ENTRY(SET_SATURATION), SDVO_CMD_NAME_ENTRY(GET_MAX_HUE), SDVO_CMD_NAME_ENTRY(GET_HUE), SDVO_CMD_NAME_ENTRY(SET_HUE), SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST), SDVO_CMD_NAME_ENTRY(GET_CONTRAST), SDVO_CMD_NAME_ENTRY(SET_CONTRAST), SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS), SDVO_CMD_NAME_ENTRY(GET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SET_SHARPNESS), SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(GET_ENCODE), SDVO_CMD_NAME_ENTRY(SET_ENCODE), SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP), SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER), SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO), SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA), }; #undef SDVO_CMD_NAME_ENTRY static const char *sdvo_cmd_name(u8 cmd) { int i; for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) return sdvo_cmd_names[i].name; } return NULL; } #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); const char *cmd_name; int i, pos = 0; char buffer[64]; #define BUF_PRINT(args...) \ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args) for (i = 0; i < args_len; i++) { BUF_PRINT("%02X ", ((u8 *)args)[i]); } for (; i < 8; i++) { BUF_PRINT(" "); } cmd_name = sdvo_cmd_name(cmd); if (cmd_name) BUF_PRINT("(%s)", cmd_name); else BUF_PRINT("(%02X)", cmd); drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); } static const char * const cmd_status_names[] = { [SDVO_CMD_STATUS_POWER_ON] = "Power on", [SDVO_CMD_STATUS_SUCCESS] = "Success", [SDVO_CMD_STATUS_NOTSUPP] = "Not supported", [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg", [SDVO_CMD_STATUS_PENDING] = "Pending", [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified", [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported", }; static const char *sdvo_cmd_status(u8 status) { if (status < ARRAY_SIZE(cmd_status_names)) return cmd_status_names[status]; else return NULL; } static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len, bool unlocked) { u8 *buf, status; struct i2c_msg *msgs; int i, ret = true; /* Would be simpler to allocate both in one go ? */ buf = kzalloc(args_len * 2 + 2, GFP_KERNEL); if (!buf) return false; msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); if (!msgs) { kfree(buf); return false; } intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; buf[2*i + 0] = SDVO_I2C_OPCODE; buf[2*i + 1] = cmd; /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; msgs[i+1].addr = intel_sdvo->slave_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; msgs[i+2].addr = intel_sdvo->slave_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; if (unlocked) ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); else ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); ret = false; goto out; } if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); ret = false; } out: kfree(msgs); kfree(buf); return ret; } static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { return __intel_sdvo_write_cmd(intel_sdvo, cmd, args, args_len, true); } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); const char *cmd_status; u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ u8 status; int i, pos = 0; char buffer[64]; buffer[0] = '\0'; /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll * the status byte a maximum of 3 times in order for the * command to be complete. * * Check 5 times in case the hardware failed to read the docs. * * Also beware that the first response by many devices is to * reply PENDING and stall for time. TVs are notorious for * requiring longer than specified to complete their replies. * Originally (in the DDX long ago), the delay was only ever 15ms * with an additional delay of 30ms applied for TVs added later after * many experiments. To accommodate both sets of delays, we do a * sequence of slow checks if the device is falling behind and fails * to reply within 5*15µs. */ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; while ((status == SDVO_CMD_STATUS_PENDING || status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { if (retry < 10) msleep(15); else udelay(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; } #define BUF_PRINT(args...) \ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args) cmd_status = sdvo_cmd_status(status); if (cmd_status) BUF_PRINT("(%s)", cmd_status); else BUF_PRINT("(??? %d)", status); if (status != SDVO_CMD_STATUS_SUCCESS) goto log_fail; /* Read the command response */ for (i = 0; i < response_len; i++) { if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; BUF_PRINT(" %02X", ((u8 *)response)[i]); } drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer); return true; log_fail: DRM_DEBUG_KMS("%s: R: ... failed %s\n", SDVO_NAME(intel_sdvo), buffer); return false; } static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode) { if (adjusted_mode->crtc_clock >= 100000) return 1; else if (adjusted_mode->crtc_clock >= 50000) return 2; else return 4; } static bool __intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { /* This must be the immediately preceding write before the i2c xfer */ return __intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1, false); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) return false; return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) return false; return intel_sdvo_read_response(intel_sdvo, value, len); } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_set_target_input_args targets = {0}; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); } /* * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; BUILD_BUG_ON(sizeof(response) != 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); } static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo, u16 *outputs) { return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_OUTPUTS, outputs, sizeof(*outputs)); } static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; BUILD_BUG_ON(sizeof(clocks) != 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_get_timing(intel_sdvo, SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, const struct drm_display_mode *mode) { struct intel_sdvo_preferred_input_timing_args args; memset(&args, 0, sizeof(args)); args.clock = mode->clock / 10; args.width = mode->hdisplay; args.height = mode->vdisplay; args.interlace = 0; if (IS_LVDS(intel_sdvo_connector)) { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(&intel_sdvo_connector->base, mode); if (fixed_mode->hdisplay != args.width || fixed_mode->vdisplay != args.height) args.scaled = 1; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { BUILD_BUG_ON(sizeof(dtd->part1) != 8); BUILD_BUG_ON(sizeof(dtd->part2) != 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { u16 width, height; u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; u16 h_sync_offset, v_sync_offset; int mode_clock; memset(dtd, 0, sizeof(*dtd)); width = mode->hdisplay; height = mode->vdisplay; /* do some mode translations */ h_blank_len = mode->htotal - mode->hdisplay; h_sync_len = mode->hsync_end - mode->hsync_start; v_blank_len = mode->vtotal - mode->vdisplay; v_sync_len = mode->vsync_end - mode->vsync_start; h_sync_offset = mode->hsync_start - mode->hdisplay; v_sync_offset = mode->vsync_start - mode->vdisplay; mode_clock = mode->clock; mode_clock /= 10; dtd->part1.clock = mode_clock; dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); dtd->part1.v_active = height & 0xff; dtd->part1.v_blank = v_blank_len & 0xff; dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_INTERLACE) dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE; if (mode->flags & DRM_MODE_FLAG_PHSYNC) dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE; if (mode->flags & DRM_MODE_FLAG_PVSYNC) dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode, const struct intel_sdvo_dtd *dtd) { struct drm_display_mode mode = {}; mode.hdisplay = dtd->part1.h_active; mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off; mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width; mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode.htotal = mode.hdisplay + dtd->part1.h_blank; mode.htotal += (dtd->part1.h_high & 0xf) << 8; mode.vdisplay = dtd->part1.v_active; mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode.vsync_start = mode.vdisplay; mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode.vsync_end = mode.vsync_start + (dtd->part2.v_sync_off_width & 0xf); mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode.vtotal = mode.vdisplay + dtd->part1.v_blank; mode.vtotal += (dtd->part1.v_high & 0xf) << 8; mode.clock = dtd->part1.clock * 10; if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) mode.flags |= DRM_MODE_FLAG_INTERLACE; if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) mode.flags |= DRM_MODE_FLAG_PHSYNC; else mode.flags |= DRM_MODE_FLAG_NHSYNC; if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) mode.flags |= DRM_MODE_FLAG_PVSYNC; else mode.flags |= DRM_MODE_FLAG_NVSYNC; drm_mode_set_crtcinfo(&mode, 0); drm_mode_copy(pmode, &mode); } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, u8 mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo, u8 pixel_repeat) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI, &pixel_repeat, 1); } static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, u8 audio_state) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, &audio_state, 1); } static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo, u8 *hbuf_size) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, hbuf_size, 1)) return false; /* Buffer size is 0 based, hooray! However zero means zero. */ if (*hbuf_size) (*hbuf_size)++; return true; } #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; u8 set_buf_index[2]; u8 av_split; u8 buf_size; u8 buf[48]; u8 *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, unsigned int if_index, u8 tx_rate, const u8 *data, unsigned int length) { u8 set_buf_index[2] = { if_index, 0 }; u8 hbuf_size, tmp[8]; int i; if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); if (hbuf_size < length) return false; for (i = 0; i < hbuf_size; i += 8) { memset(tmp, 0, 8); if (i < length) memcpy(tmp, data + i, min_t(unsigned, 8, length - i)); if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, tmp, 8)) return false; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, unsigned int if_index, u8 *data, unsigned int length) { u8 set_buf_index[2] = { if_index, 0 }; u8 hbuf_size, tx_rate, av_split; int i; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1)) return -ENXIO; if (av_split < if_index) return 0; if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return -ENXIO; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_TXRATE, &tx_rate, 1)) return -ENXIO; /* TX_DISABLED doesn't mean disabled for ELD */ if (if_index != SDVO_HBUF_INDEX_ELD && tx_rate == SDVO_HBUF_TX_DISABLED) return 0; if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); hbuf_size = min_t(unsigned int, length, hbuf_size); for (i = 0; i < hbuf_size; i += 8) { if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0)) return -ENXIO; if (!intel_sdvo_read_response(intel_sdvo, &data[i], min_t(unsigned int, 8, hbuf_size - i))) return -ENXIO; } return hbuf_size; } static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int ret; if (!crtc_state->has_hdmi_sink) return true; crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); ret = drm_hdmi_avi_infoframe_from_display_mode(frame, conn_state->connector, adjusted_mode); if (ret) return false; drm_hdmi_avi_infoframe_quant_range(frame, conn_state->connector, adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); ret = hdmi_avi_infoframe_check(frame); if (drm_WARN_ON(&dev_priv->drm, ret)) return false; return true; } static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; const union hdmi_infoframe *frame = &crtc_state->infoframes.avi; ssize_t len; if ((crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0) return true; if (drm_WARN_ON(&dev_priv->drm, frame->any.type != HDMI_INFOFRAME_TYPE_AVI)) return false; len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data)); if (drm_WARN_ON(&dev_priv->drm, len < 0)) return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, SDVO_HBUF_TX_VSYNC, sdvo_data, len); } static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, struct intel_crtc_state *crtc_state) { u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; union hdmi_infoframe *frame = &crtc_state->infoframes.avi; ssize_t len; int ret; if (!crtc_state->has_hdmi_sink) return; len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, sdvo_data, sizeof(sdvo_data)); if (len < 0) { DRM_DEBUG_KMS("failed to read AVI infoframe\n"); return; } else if (len == 0) { return; } crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); ret = hdmi_infoframe_unpack(frame, sdvo_data, len); if (ret) { DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n"); return; } if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI) DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", frame->any.type, HDMI_INFOFRAME_TYPE_AVI); } static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); ssize_t len; u8 val; if (!crtc_state->has_audio) return; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1)) return; if ((val & SDVO_AUDIO_ELD_VALID) == 0) return; len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, crtc_state->eld, sizeof(crtc_state->eld)); if (len < 0) drm_dbg_kms(&i915->drm, "failed to read ELD\n"); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state) { struct intel_sdvo_tv_format format; u32 format_map; format_map = 1 << conn_state->tv.mode; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); BUILD_BUG_ON(sizeof(format) != 6); return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, const struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return false; intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) return false; return true; } /* * Asks the sdvo controller for the preferred input mode given the output mode. * Unfortunately we have to set up the full output mode to do that. */ static bool intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo_dtd input_dtd; /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, intel_sdvo_connector, mode)) return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, &input_dtd)) return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags; return true; } static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev); unsigned dotclock = pipe_config->port_clock; struct dpll *clock = &pipe_config->dpll; /* * SDVO TV has fixed PLL values depend on its clock range, * this mirrors vbios setting. */ if (dotclock >= 100000 && dotclock < 140500) { clock->p1 = 2; clock->p2 = 10; clock->n = 3; clock->m1 = 16; clock->m2 = 8; } else if (dotclock >= 140500 && dotclock <= 200000) { clock->p1 = 1; clock->p2 = 10; clock->n = 6; clock->m1 = 12; clock->m2 = 8; } else { drm_WARN(&dev_priv->drm, 1, "SDVO TV clock out of range: %i\n", dotclock); } pipe_config->clock_set = true; } static bool intel_has_hdmi_sink(struct intel_sdvo_connector *intel_sdvo_connector, const struct drm_connector_state *conn_state) { struct drm_connector *connector = conn_state->connector; return intel_sdvo_connector->is_hdmi && connector->display_info.is_hdmi && READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0) return false; return intel_hdmi_limited_color_range(crtc_state, conn_state); } static bool intel_sdvo_has_audio(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_connector *connector = conn_state->connector; struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); if (!crtc_state->has_hdmi_sink) return false; if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) return intel_sdvo_connector->is_hdmi && connector->display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } static int intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(conn_state->connector); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_display_mode *mode = &pipe_config->hw.mode; DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); pipe_config->pipe_bpp = 8*3; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) pipe_config->has_pch_encoder = true; /* * We need to construct preferred input timings based on our * output timings. To do that, we have to set the output * timings, even though this isn't really the right place in * the sequence to do it. Oh well. */ if (IS_TV(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, mode, adjusted_mode); pipe_config->sdvo_tv_clock = true; } else if (IS_LVDS(intel_sdvo_connector)) { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(&intel_sdvo_connector->base, mode); int ret; ret = intel_panel_compute_config(&intel_sdvo_connector->base, adjusted_mode); if (ret) return ret; if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, fixed_mode)) return -EINVAL; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, intel_sdvo_connector, mode, adjusted_mode); } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; /* * Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. */ pipe_config->pixel_multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, conn_state); pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); pipe_config->limited_color_range = intel_sdvo_limited_color_range(encoder, pipe_config, conn_state); /* Clock computation needs to happen after pixel multiplier. */ if (IS_TV(intel_sdvo_connector)) i9xx_adjust_sdvo_tv_clock(pipe_config); if (conn_state->picture_aspect_ratio) adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; if (!intel_sdvo_compute_avi_infoframe(intel_sdvo, pipe_config, conn_state)) { DRM_DEBUG_KMS("bad AVI infoframe\n"); return -EINVAL; } return 0; } #define UPDATE_PROPERTY(input, NAME) \ do { \ val = input; \ intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_##NAME, &val, sizeof(val)); \ } while (0) static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, const struct intel_sdvo_connector_state *sdvo_state) { const struct drm_connector_state *conn_state = &sdvo_state->base.base; struct intel_sdvo_connector *intel_sdvo_conn = to_intel_sdvo_connector(conn_state->connector); u16 val; if (intel_sdvo_conn->left) UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H); if (intel_sdvo_conn->top) UPDATE_PROPERTY(sdvo_state->tv.overscan_v, OVERSCAN_V); if (intel_sdvo_conn->hpos) UPDATE_PROPERTY(sdvo_state->tv.hpos, HPOS); if (intel_sdvo_conn->vpos) UPDATE_PROPERTY(sdvo_state->tv.vpos, VPOS); if (intel_sdvo_conn->saturation) UPDATE_PROPERTY(conn_state->tv.saturation, SATURATION); if (intel_sdvo_conn->contrast) UPDATE_PROPERTY(conn_state->tv.contrast, CONTRAST); if (intel_sdvo_conn->hue) UPDATE_PROPERTY(conn_state->tv.hue, HUE); if (intel_sdvo_conn->brightness) UPDATE_PROPERTY(conn_state->tv.brightness, BRIGHTNESS); if (intel_sdvo_conn->sharpness) UPDATE_PROPERTY(sdvo_state->tv.sharpness, SHARPNESS); if (intel_sdvo_conn->flicker_filter) UPDATE_PROPERTY(sdvo_state->tv.flicker_filter, FLICKER_FILTER); if (intel_sdvo_conn->flicker_filter_2d) UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_2d, FLICKER_FILTER_2D); if (intel_sdvo_conn->flicker_filter_adaptive) UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); if (intel_sdvo_conn->tv_chroma_filter) UPDATE_PROPERTY(sdvo_state->tv.chroma_filter, TV_CHROMA_FILTER); if (intel_sdvo_conn->tv_luma_filter) UPDATE_PROPERTY(sdvo_state->tv.luma_filter, TV_LUMA_FILTER); if (intel_sdvo_conn->dot_crawl) UPDATE_PROPERTY(sdvo_state->tv.dot_crawl, DOT_CRAWL); #undef UPDATE_PROPERTY } static void intel_sdvo_pre_enable(struct intel_atomic_state *state, struct intel_encoder *intel_encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(conn_state->connector); const struct drm_display_mode *mode = &crtc_state->hw.mode; struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; int rate; intel_sdvo_update_props(intel_sdvo, sdvo_state); /* * First, set the input mapping for the first input to our controlled * output. This is only correct if we're a single-input device, in * which case the first input is the output from the appropriate SDVO * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); /* Set the output timings to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; /* lvds has a special fixed output timing. */ if (IS_LVDS(intel_sdvo_connector)) { const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(&intel_sdvo_connector->base, mode); intel_sdvo_get_dtd_from_mode(&output_dtd, fixed_mode); } else { intel_sdvo_get_dtd_from_mode(&output_dtd, mode); } if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) drm_info(&dev_priv->drm, "Setting output timings on %s failed\n", SDVO_NAME(intel_sdvo)); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return; if (crtc_state->has_hdmi_sink) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, crtc_state->limited_color_range ? SDVO_COLORIMETRY_RGB220 : SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state); intel_sdvo_set_pixel_replication(intel_sdvo, !!(adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); if (IS_TV(intel_sdvo_connector) && !intel_sdvo_set_tv_format(intel_sdvo, conn_state)) return; intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector)) input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) drm_info(&dev_priv->drm, "Setting input timings on %s failed\n", SDVO_NAME(intel_sdvo)); switch (crtc_state->pixel_multiplier) { default: drm_WARN(&dev_priv->drm, 1, "unknown pixel multiplier specified\n"); fallthrough; case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) return; /* Set the SDVO control regs. */ if (DISPLAY_VER(dev_priv) >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (DISPLAY_VER(dev_priv) < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); if (intel_sdvo->port == PORT_B) sdvox &= SDVOB_PRESERVE_MASK; else sdvox &= SDVOC_PRESERVE_MASK; sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } if (HAS_PCH_CPT(dev_priv)) sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); else sdvox |= SDVO_PIPE_SEL(crtc->pipe); if (DISPLAY_VER(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (crtc_state->pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && DISPLAY_VER(dev_priv) < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(&connector->base); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); u16 active_outputs = 0; intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); return active_outputs & intel_sdvo_connector->output_flag; } bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum pipe *pipe) { u32 val; val = intel_de_read(dev_priv, sdvo_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT; else if (IS_CHERRYVIEW(dev_priv)) *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV; else *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT; return val & SDVO_ENABLE; } static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; bool ret; intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe); return ret || active_outputs; } static void intel_sdvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_dtd dtd; int encoder_pixel_multiplier = 0; int dotclock; u32 flags = 0, sdvox; u8 val; bool ret; pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO); sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); if (!ret) { /* * Some sdvo encoders are not spec compliant and don't * implement the mandatory get_timings function. */ drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n"); pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS; } else { if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; } pipe_config->hw.adjusted_mode.flags |= flags; /* * pixel multiplier readout is tricky: Only on i915g/gm it is stored in * the sdvo port register, on all other platforms it is part of the dpll * state. Since the general pipe state readout happens before the * encoder->get_config we so already have a valid pixel multplier on all * other platfroms. */ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { pipe_config->pixel_multiplier = ((sdvox & SDVO_PORT_MULTIPLY_MASK) >> SDVO_PORT_MULTIPLY_SHIFT) + 1; } dotclock = pipe_config->port_clock; if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; pipe_config->hw.adjusted_mode.crtc_clock = dotclock; /* Cross check the port pixel multiplier with the sdvo encoder state. */ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1)) { switch (val) { case SDVO_CLOCK_RATE_MULT_1X: encoder_pixel_multiplier = 1; break; case SDVO_CLOCK_RATE_MULT_2X: encoder_pixel_multiplier = 2; break; case SDVO_CLOCK_RATE_MULT_4X: encoder_pixel_multiplier = 4; break; } } drm_WARN(dev, encoder_pixel_multiplier != pipe_config->pixel_multiplier, "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", pipe_config->pixel_multiplier, encoder_pixel_multiplier); if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY, &val, 1)) { if (val == SDVO_COLORIMETRY_RGB220) pipe_config->limited_color_range = true; } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1)) { if (val & SDVO_AUDIO_PRESENCE_DETECT) pipe_config->has_audio = true; } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { if (val == SDVO_ENCODE_HDMI) pipe_config->has_hdmi_sink = true; } intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config); intel_sdvo_get_eld(intel_sdvo, pipe_config); } static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) { intel_sdvo_set_audio_state(intel_sdvo, 0); } static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { const u8 *eld = crtc_state->eld; intel_sdvo_set_audio_state(intel_sdvo, 0); intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, SDVO_HBUF_TX_DISABLED, eld, drm_eld_size(eld)); intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT); } static void intel_disable_sdvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; if (old_crtc_state->has_audio) intel_sdvo_disable_audio(intel_sdvo); intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_OFF); temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); /* * HW workaround for IBX, we need to move the port * to transcoder A after disabling it to allow the * matching DP port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) { /* * We get CPU/PCH FIFO underruns on the other pipe when * doing the workaround. Sweep them under the rug. */ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); temp &= ~SDVO_PIPE_SEL_MASK; temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A); intel_sdvo_write_sdvox(intel_sdvo, temp); temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } } static void pch_disable_sdvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { } static void pch_post_disable_sdvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state); } static void intel_enable_sdvo(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 temp; bool input1, input2; int i; bool success; temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); temp |= SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); for (i = 0; i < 2; i++) intel_crtc_wait_for_next_vblank(crtc); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* * Warn if the device reported failure to sync. * * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (success && !input1) { drm_dbg_kms(&dev_priv->drm, "First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); if (pipe_config->has_audio) intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state); int clock = mode->clock; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (clock > max_dotclk) return MODE_CLOCK_HIGH; if (mode->flags & DRM_MODE_FLAG_DBLCLK) { if (!has_hdmi_sink) return MODE_CLOCK_LOW; clock *= 2; } if (intel_sdvo->pixel_clock_min > clock) return MODE_CLOCK_LOW; if (intel_sdvo->pixel_clock_max < clock) return MODE_CLOCK_HIGH; if (IS_LVDS(intel_sdvo_connector)) { enum drm_mode_status status; status = intel_panel_mode_valid(&intel_sdvo_connector->base, mode); if (status != MODE_OK) return status; } return MODE_OK; } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; DRM_DEBUG_KMS("SDVO capabilities:\n" " vendor_id: %d\n" " device_id: %d\n" " device_rev_id: %d\n" " sdvo_version_major: %d\n" " sdvo_version_minor: %d\n" " sdvo_inputs_mask: %d\n" " smooth_scaling: %d\n" " sharp_scaling: %d\n" " up_scaling: %d\n" " down_scaling: %d\n" " stall_support: %d\n" " output_flags: %d\n", caps->vendor_id, caps->device_id, caps->device_rev_id, caps->sdvo_version_major, caps->sdvo_version_minor, caps->sdvo_inputs_mask, caps->smooth_scaling, caps->sharp_scaling, caps->up_scaling, caps->down_scaling, caps->stall_support, caps->output_flags); return true; } static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo) { u8 cap; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP, &cap, sizeof(cap))) return SDVO_COLORIMETRY_RGB256; return cap; } static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); u16 hotplug; if (!I915_HAS_HOTPLUG(dev_priv)) return 0; /* * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise * on the line. */ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) return 0; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &hotplug, sizeof(hotplug))) return 0; return hotplug; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } static enum intel_hotplug_state intel_sdvo_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { intel_sdvo_enable_hotplug(encoder); return intel_encoder_hotplug(encoder, connector); } static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { /* Is there more than one type of output? */ return hweight16(intel_sdvo->caps.output_flags) > 1; } static const struct drm_edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); return drm_edid_read_ddc(connector, &sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ static const struct drm_edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); struct i2c_adapter *i2c; i2c = intel_gmbus_get_adapter(i915, i915->display.vbt.crt_ddc_pin); return drm_edid_read_ddc(connector, i2c); } static enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); enum drm_connector_status status; const struct drm_edid *drm_edid; drm_edid = intel_sdvo_get_edid(connector); if (!drm_edid && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; drm_edid = intel_sdvo_get_edid(connector); if (drm_edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ if (!drm_edid) intel_sdvo->ddc_bus = saved_ddc; } /* * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ if (!drm_edid) drm_edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; if (drm_edid) { const struct edid *edid = drm_edid_raw(drm_edid); /* DDC bus is shared, match EDID to connector type */ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) status = connector_status_connected; else status = connector_status_disconnected; drm_edid_free(drm_edid); } return status; } static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, const struct drm_edid *drm_edid) { const struct edid *edid = drm_edid_raw(drm_edid); bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_DIGITAL(sdvo); DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", connector_is_digital, monitor_is_digital); return connector_is_digital == monitor_is_digital; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; u16 response; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (!INTEL_DISPLAY_ENABLED(i915)) return connector_status_disconnected; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, &response, 2)) return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", response & 0xff, response >> 8, intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; intel_sdvo->attached_output = response; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_tmds_sink_detect(connector); else { const struct drm_edid *drm_edid; /* if we have an edid check it matches the connection */ drm_edid = intel_sdvo_get_edid(connector); if (!drm_edid) drm_edid = intel_sdvo_get_analog_edid(connector); if (drm_edid) { if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, drm_edid)) ret = connector_status_connected; else ret = connector_status_disconnected; drm_edid_free(drm_edid); } else { ret = connector_status_connected; } } return ret; } static int intel_sdvo_get_ddc_modes(struct drm_connector *connector) { int num_modes = 0; const struct drm_edid *drm_edid; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); /* set the bus switch and get the modes */ drm_edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC * link between analog and digital outputs. So, if the regular SDVO * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ if (!drm_edid) drm_edid = intel_sdvo_get_analog_edid(connector); if (!drm_edid) return 0; if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), drm_edid)) num_modes += intel_connector_update_modes(connector, drm_edid); drm_edid_free(drm_edid); return num_modes; } /* * Set of SDVO TV modes. * Note! This is in reply order (see loop in get_tv_modes). * XXX: all 60Hz refresh? */ static const struct drm_display_mode sdvo_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static int intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; u32 reply = 0, format_map = 0; int num_modes = 0; int i; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); /* * Read the list of supported input resolutions for the selected TV * format. */ format_map = 1 << conn_state->tv.mode; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return 0; BUILD_BUG_ON(sizeof(tv_res) != 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return 0; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return 0; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) { if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); if (nmode) { drm_mode_probed_add(connector, nmode); num_modes++; } } } return num_modes; } static int intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); return intel_panel_get_modes(to_intel_connector(connector)); } static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) return intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) return intel_sdvo_get_lvds_modes(connector); else return intel_sdvo_get_ddc_modes(connector); } static int intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, u64 *val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state); if (property == intel_sdvo_connector->tv_format) { int i; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) { *val = i; return 0; } drm_WARN_ON(connector->dev, 1); *val = 0; } else if (property == intel_sdvo_connector->top || property == intel_sdvo_connector->bottom) *val = intel_sdvo_connector->max_vscan - sdvo_state->tv.overscan_v; else if (property == intel_sdvo_connector->left || property == intel_sdvo_connector->right) *val = intel_sdvo_connector->max_hscan - sdvo_state->tv.overscan_h; else if (property == intel_sdvo_connector->hpos) *val = sdvo_state->tv.hpos; else if (property == intel_sdvo_connector->vpos) *val = sdvo_state->tv.vpos; else if (property == intel_sdvo_connector->saturation) *val = state->tv.saturation; else if (property == intel_sdvo_connector->contrast) *val = state->tv.contrast; else if (property == intel_sdvo_connector->hue) *val = state->tv.hue; else if (property == intel_sdvo_connector->brightness) *val = state->tv.brightness; else if (property == intel_sdvo_connector->sharpness) *val = sdvo_state->tv.sharpness; else if (property == intel_sdvo_connector->flicker_filter) *val = sdvo_state->tv.flicker_filter; else if (property == intel_sdvo_connector->flicker_filter_2d) *val = sdvo_state->tv.flicker_filter_2d; else if (property == intel_sdvo_connector->flicker_filter_adaptive) *val = sdvo_state->tv.flicker_filter_adaptive; else if (property == intel_sdvo_connector->tv_chroma_filter) *val = sdvo_state->tv.chroma_filter; else if (property == intel_sdvo_connector->tv_luma_filter) *val = sdvo_state->tv.luma_filter; else if (property == intel_sdvo_connector->dot_crawl) *val = sdvo_state->tv.dot_crawl; else return intel_digital_connector_atomic_get_property(connector, state, property, val); return 0; } static int intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, u64 val) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); if (property == intel_sdvo_connector->tv_format) { state->tv.mode = intel_sdvo_connector->tv_format_supported[val]; if (state->crtc) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); crtc_state->connectors_changed = true; } } else if (property == intel_sdvo_connector->top || property == intel_sdvo_connector->bottom) /* Cannot set these independent from each other */ sdvo_state->tv.overscan_v = intel_sdvo_connector->max_vscan - val; else if (property == intel_sdvo_connector->left || property == intel_sdvo_connector->right) /* Cannot set these independent from each other */ sdvo_state->tv.overscan_h = intel_sdvo_connector->max_hscan - val; else if (property == intel_sdvo_connector->hpos) sdvo_state->tv.hpos = val; else if (property == intel_sdvo_connector->vpos) sdvo_state->tv.vpos = val; else if (property == intel_sdvo_connector->saturation) state->tv.saturation = val; else if (property == intel_sdvo_connector->contrast) state->tv.contrast = val; else if (property == intel_sdvo_connector->hue) state->tv.hue = val; else if (property == intel_sdvo_connector->brightness) state->tv.brightness = val; else if (property == intel_sdvo_connector->sharpness) sdvo_state->tv.sharpness = val; else if (property == intel_sdvo_connector->flicker_filter) sdvo_state->tv.flicker_filter = val; else if (property == intel_sdvo_connector->flicker_filter_2d) sdvo_state->tv.flicker_filter_2d = val; else if (property == intel_sdvo_connector->flicker_filter_adaptive) sdvo_state->tv.flicker_filter_adaptive = val; else if (property == intel_sdvo_connector->tv_chroma_filter) sdvo_state->tv.chroma_filter = val; else if (property == intel_sdvo_connector->tv_luma_filter) sdvo_state->tv.luma_filter = val; else if (property == intel_sdvo_connector->dot_crawl) sdvo_state->tv.dot_crawl = val; else return intel_digital_connector_atomic_set_property(connector, state, property, val); return 0; } static int intel_sdvo_connector_register(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); int ret; ret = intel_connector_register(connector); if (ret) return ret; return sysfs_create_link(&connector->kdev->kobj, &sdvo->ddc.dev.kobj, sdvo->ddc.dev.kobj.name); } static void intel_sdvo_connector_unregister(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector)); sysfs_remove_link(&connector->kdev->kobj, sdvo->ddc.dev.kobj.name); intel_connector_unregister(connector); } static struct drm_connector_state * intel_sdvo_connector_duplicate_state(struct drm_connector *connector) { struct intel_sdvo_connector_state *state; state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_connector_duplicate_state(connector, &state->base.base); return &state->base.base; } static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_sdvo_connector_atomic_get_property, .atomic_set_property = intel_sdvo_connector_atomic_set_property, .late_register = intel_sdvo_connector_register, .early_unregister = intel_sdvo_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, }; static int intel_sdvo_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state) { struct drm_connector_state *new_conn_state = drm_atomic_get_new_connector_state(state, conn); struct drm_connector_state *old_conn_state = drm_atomic_get_old_connector_state(state, conn); struct intel_sdvo_connector_state *old_state = to_intel_sdvo_connector_state(old_conn_state); struct intel_sdvo_connector_state *new_state = to_intel_sdvo_connector_state(new_conn_state); if (new_conn_state->crtc && (memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) || memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); crtc_state->connectors_changed = true; } return intel_digital_connector_atomic_check(conn, state); } static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, .atomic_check = intel_sdvo_atomic_check, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { u16 mask = 0; unsigned int num_bits; /* * Make a mask of outputs less than or equal to our own priority in the * list. */ switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; fallthrough; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; fallthrough; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; fallthrough; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; fallthrough; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; fallthrough; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; } /* Count bits to find what number we are in the priority list. */ mask &= sdvo->caps.output_flags; num_bits = hweight16(mask); /* If more than 3 outputs, default to DDC bus 3 for now. */ if (num_bits > 3) num_bits = 3; /* Corresponds to SDVO_CONTROL_BUS_DDCx */ sdvo->ddc_bus = 1 << num_bits; } /* * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * * DDC bus number assignment is in a priority order of RGB outputs, then TMDS * outputs, then LVDS outputs. */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo) { struct sdvo_device_mapping *mapping; if (sdvo->port == PORT_B) mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); else intel_sdvo_guess_ddc_bus(sdvo); } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo) { struct sdvo_device_mapping *mapping; u8 pin; if (sdvo->port == PORT_B) mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized && intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) pin = mapping->i2c_pin; else pin = GMBUS_PIN_DPB; sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); /* * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow * our code totally fails once we start using gmbus. Hence fall back to * bit banging for now. */ intel_gmbus_force_bit(sdvo->i2c, true); } /* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ static void intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) { intel_gmbus_force_bit(sdvo->i2c, false); } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo) { return intel_sdvo_check_supp_encode(intel_sdvo); } static u8 intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo) { struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { my_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; other_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; } else { my_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; other_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ if (my_mapping->slave_addr) return my_mapping->slave_addr; /* * If the BIOS only described a different SDVO device, use the * address that it isn't using. */ if (other_mapping->slave_addr) { if (other_mapping->slave_addr == 0x70) return 0x72; else return 0x70; } /* * No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ if (sdvo->port == PORT_B) return 0x70; else return 0x72; } static int intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { struct drm_connector *drm_connector; int ret; drm_connector = &connector->base.base; ret = drm_connector_init(encoder->base.base.dev, drm_connector, &intel_sdvo_connector_funcs, connector->base.base.connector_type); if (ret < 0) return ret; drm_connector_helper_add(drm_connector, &intel_sdvo_connector_helper_funcs); connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; connector->base.base.interlace_allowed = true; connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; intel_connector_attach_encoder(&connector->base, &encoder->base); return 0; } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *connector) { intel_attach_force_audio_property(&connector->base.base); if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) intel_attach_broadcast_rgb_property(&connector->base.base); intel_attach_aspect_ratio_property(&connector->base.base); } static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) { struct intel_sdvo_connector *sdvo_connector; struct intel_sdvo_connector_state *conn_state; sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL); if (!sdvo_connector) return NULL; conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); if (!conn_state) { kfree(sdvo_connector); return NULL; } __drm_atomic_helper_connector_reset(&sdvo_connector->base.base, &conn_state->base.base); intel_panel_init_alloc(&sdvo_connector->base); return sdvo_connector; } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; intel_sdvo_connector->output_flag = type; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; if (intel_sdvo_get_hotplug_support(intel_sdvo) & intel_sdvo_connector->output_flag) { intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; /* * Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_encoder->hotplug = intel_sdvo_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else { intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; if (intel_sdvo_is_hdmi_connector(intel_sdvo)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo_connector->is_hdmi = true; } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); return false; } if (intel_sdvo_connector->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); return true; } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; DRM_DEBUG_KMS("initialising TV type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); return false; } if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_connector_destroy(connector); return false; } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; DRM_DEBUG_KMS("initialising analog type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); return false; } return true; } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_i915_private *i915 = to_i915(encoder->dev); struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); return false; } if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL); /* * Fetch modes from VBT. For SDVO prefer the VBT mode since some * SDVO->LVDS transcoders can't cope with the EDID mode. */ intel_panel_add_vbt_sdvo_fixed_mode(intel_connector); if (!intel_panel_preferred_fixed_mode(intel_connector)) { mutex_lock(&i915->drm.mode_config.mutex); intel_ddc_get_modes(connector, &intel_sdvo->ddc); intel_panel_add_edid_fixed_modes(intel_connector, false); mutex_unlock(&i915->drm.mode_config.mutex); } intel_panel_init(intel_connector, NULL); if (!intel_panel_preferred_fixed_mode(intel_connector)) goto err; return true; err: intel_connector_destroy(connector); return false; } static u16 intel_sdvo_filter_output_flags(u16 flags) { flags &= SDVO_OUTPUT_MASK; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (!(flags & SDVO_OUTPUT_TMDS0)) flags &= ~SDVO_OUTPUT_TMDS1; if (!(flags & SDVO_OUTPUT_RGB0)) flags &= ~SDVO_OUTPUT_RGB1; if (!(flags & SDVO_OUTPUT_LVDS0)) flags &= ~SDVO_OUTPUT_LVDS1; return flags; } static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type) { if (type & SDVO_TMDS_MASK) return intel_sdvo_dvi_init(sdvo, type); else if (type & SDVO_TV_MASK) return intel_sdvo_tv_init(sdvo, type); else if (type & SDVO_RGB_MASK) return intel_sdvo_analog_init(sdvo, type); else if (type & SDVO_LVDS_MASK) return intel_sdvo_lvds_init(sdvo, type); else return false; } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo) { static const u16 probe_order[] = { SDVO_OUTPUT_TMDS0, SDVO_OUTPUT_TMDS1, /* TV has no XXX1 function block */ SDVO_OUTPUT_SVID0, SDVO_OUTPUT_CVBS0, SDVO_OUTPUT_YPRPB0, SDVO_OUTPUT_RGB0, SDVO_OUTPUT_RGB1, SDVO_OUTPUT_LVDS0, SDVO_OUTPUT_LVDS1, }; struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); u16 flags; int i; flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags); if (flags == 0) { DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags); return false; } intel_sdvo->controlled_output = flags; intel_sdvo_select_ddc_bus(i915, intel_sdvo); for (i = 0; i < ARRAY_SIZE(probe_order); i++) { u16 type = flags & probe_order[i]; if (!type) continue; if (!intel_sdvo_output_init(intel_sdvo, type)) return false; } intel_sdvo->base.pipe_mask = ~0; return true; } static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector, *tmp; list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) { drm_connector_unregister(connector); intel_connector_destroy(connector); } } } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; u32 format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; BUILD_BUG_ON(sizeof(format) != 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) return false; memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) return false; intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; intel_sdvo_connector->tv_format = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum(intel_sdvo_connector->tv_format, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0]; drm_object_attach_property(&intel_sdvo_connector->base.base.base, intel_sdvo_connector->tv_format, 0); return true; } #define _ENHANCEMENT(state_assignment, name, NAME) do { \ if (enhancements.name) { \ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ return false; \ intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ state_assignment = response; \ drm_object_attach_property(&connector->base, \ intel_sdvo_connector->name, 0); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ data_value[0], data_value[1], response); \ } \ } while (0) #define ENHANCEMENT(state, name, NAME) _ENHANCEMENT((state)->name, name, NAME) static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); u16 response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_H, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_H, &response, 2)) return false; sdvo_state->tv.overscan_h = response; intel_sdvo_connector->max_hscan = data_value[0]; intel_sdvo_connector->left = drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); if (!intel_sdvo_connector->left) return false; drm_object_attach_property(&connector->base, intel_sdvo_connector->left, 0); intel_sdvo_connector->right = drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); if (!intel_sdvo_connector->right) return false; drm_object_attach_property(&connector->base, intel_sdvo_connector->right, 0); DRM_DEBUG_KMS("h_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } if (enhancements.overscan_v) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_V, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_V, &response, 2)) return false; sdvo_state->tv.overscan_v = response; intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top = drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]); if (!intel_sdvo_connector->top) return false; drm_object_attach_property(&connector->base, intel_sdvo_connector->top, 0); intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]); if (!intel_sdvo_connector->bottom) return false; drm_object_attach_property(&connector->base, intel_sdvo_connector->bottom, 0); DRM_DEBUG_KMS("v_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } ENHANCEMENT(&sdvo_state->tv, hpos, HPOS); ENHANCEMENT(&sdvo_state->tv, vpos, VPOS); ENHANCEMENT(&conn_state->tv, saturation, SATURATION); ENHANCEMENT(&conn_state->tv, contrast, CONTRAST); ENHANCEMENT(&conn_state->tv, hue, HUE); ENHANCEMENT(&conn_state->tv, brightness, BRIGHTNESS); ENHANCEMENT(&sdvo_state->tv, sharpness, SHARPNESS); ENHANCEMENT(&sdvo_state->tv, flicker_filter, FLICKER_FILTER); ENHANCEMENT(&sdvo_state->tv, flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); ENHANCEMENT(&sdvo_state->tv, flicker_filter_2d, FLICKER_FILTER_2D); _ENHANCEMENT(sdvo_state->tv.chroma_filter, tv_chroma_filter, TV_CHROMA_FILTER); _ENHANCEMENT(sdvo_state->tv.luma_filter, tv_luma_filter, TV_LUMA_FILTER); if (enhancements.dot_crawl) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) return false; sdvo_state->tv.dot_crawl = response & 0x1; intel_sdvo_connector->dot_crawl = drm_property_create_range(dev, 0, "dot_crawl", 0, 1); if (!intel_sdvo_connector->dot_crawl) return false; drm_object_attach_property(&connector->base, intel_sdvo_connector->dot_crawl, 0); DRM_DEBUG_KMS("dot crawl: current %d\n", response); } return true; } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; u16 response, data_value[2]; ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS); return true; } #undef ENHANCEMENT #undef _ENHANCEMENT static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { union { struct intel_sdvo_enhancements_reply reply; u16 response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)) || enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; } if (IS_TV(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); else if (IS_LVDS(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; } static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_sdvo *sdvo = adapter->algo_data; if (!__intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) return -EIO; return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); } static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) { struct intel_sdvo *sdvo = adapter->algo_data; return sdvo->i2c->algo->functionality(sdvo->i2c); } static const struct i2c_algorithm intel_sdvo_ddc_proxy = { .master_xfer = intel_sdvo_ddc_proxy_xfer, .functionality = intel_sdvo_ddc_proxy_func }; static void proxy_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_sdvo *sdvo = adapter->algo_data; sdvo->i2c->lock_ops->lock_bus(sdvo->i2c, flags); } static int proxy_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_sdvo *sdvo = adapter->algo_data; return sdvo->i2c->lock_ops->trylock_bus(sdvo->i2c, flags); } static void proxy_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_sdvo *sdvo = adapter->algo_data; sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags); } static const struct i2c_lock_operations proxy_lock_ops = { .lock_bus = proxy_lock_bus, .trylock_bus = proxy_trylock_bus, .unlock_bus = proxy_unlock_bus, }; static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); sdvo->ddc.dev.parent = &pdev->dev; sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo = &intel_sdvo_ddc_proxy; sdvo->ddc.lock_ops = &proxy_lock_ops; return i2c_add_adapter(&sdvo->ddc) == 0; } static bool is_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port) { if (HAS_PCH_SPLIT(dev_priv)) return port == PORT_B; else return port == PORT_B || port == PORT_C; } static bool assert_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port) { return !drm_WARN(&dev_priv->drm, !is_sdvo_port_valid(dev_priv, port), "Platform does not support SDVO %c\n", port_name(port)); } bool intel_sdvo_init(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum port port) { struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; if (!assert_port_valid(dev_priv, port)) return false; if (!assert_sdvo_port_valid(dev_priv, port)) return false; intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->port = port; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv)) goto err_i2c_bus; /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = port; drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, "SDVO %c", port_name(port)); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { drm_dbg_kms(&dev_priv->drm, "No SDVO device found on %s\n", SDVO_NAME(intel_sdvo)); goto err; } } intel_encoder->compute_config = intel_sdvo_compute_config; if (HAS_PCH_SPLIT(dev_priv)) { intel_encoder->disable = pch_disable_sdvo; intel_encoder->post_disable = pch_post_disable_sdvo; } else { intel_encoder->disable = intel_disable_sdvo; } intel_encoder->pre_enable = intel_sdvo_pre_enable; intel_encoder->enable = intel_enable_sdvo; intel_encoder->get_hw_state = intel_sdvo_get_hw_state; intel_encoder->get_config = intel_sdvo_get_config; /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; intel_sdvo->colorimetry_cap = intel_sdvo_get_colorimetry_cap(intel_sdvo); if (!intel_sdvo_output_setup(intel_sdvo)) { drm_dbg_kms(&dev_priv->drm, "SDVO output failed to setup on %s\n", SDVO_NAME(intel_sdvo)); /* Output_setup can leave behind connectors! */ goto err_output; } /* * Only enable the hotplug irq if we need it, to work around noisy * hotplug lines. */ if (intel_sdvo->hotplug_active) { if (intel_sdvo->port == PORT_B) intel_encoder->hpd_pin = HPD_SDVO_B; else intel_encoder->hpd_pin = HPD_SDVO_C; } /* * Cloning SDVO with anything is often impossible, since the SDVO * encoder can request a special input timing mode. And even if that's * not the case we have evidence that cloning a plain unscaled mode with * VGA doesn't really work. Furthermore the cloning flags are way too * simplistic anyway to express such constraints, so just give up on * cloning for SDVO encoders. */ intel_sdvo->base.cloneable = 0; /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err_output; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) goto err_output; drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N', intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 | SDVO_OUTPUT_LVDS1) ? 'Y' : 'N'); return true; err_output: intel_sdvo_output_cleanup(intel_sdvo); err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); err_i2c_bus: intel_sdvo_unselect_i2c_bus(intel_sdvo); kfree(intel_sdvo); return false; }
linux-master
drivers/gpu/drm/i915/display/intel_sdvo.c
/* * Copyright © 2008 Intel Corporation * 2014 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" #include "skl_scaler.h" static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp, const struct drm_display_mode *adjusted_mode, struct intel_crtc_state *crtc_state, bool dsc) { if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) { int output_bpp = bpp; /* DisplayPort 2 128b/132b, bits per lane is always 32 */ int symbol_clock = crtc_state->port_clock / 32; if (output_bpp * adjusted_mode->crtc_clock >= symbol_clock * 72) { drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n", output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72); return -EINVAL; } } return 0; } static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int max_bpp, int min_bpp, struct link_config_limits *limits, struct drm_connector_state *conn_state, int step, bool dsc) { struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct drm_dp_mst_topology_state *mst_state; struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpp, slots = -EINVAL; int ret = 0; mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); if (IS_ERR(mst_state)) return PTR_ERR(mst_state); crtc_state->lane_count = limits->max_lane_count; crtc_state->port_clock = limits->max_rate; // TODO: Handle pbn_div changes by adding a new MST helper if (!mst_state->pbn_div) { mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, crtc_state->port_clock, crtc_state->lane_count); } for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc); if (ret) continue; crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, dsc ? bpp << 4 : bpp, dsc); slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, connector->port, crtc_state->pbn); if (slots == -EDEADLK) return slots; if (slots >= 0) { ret = drm_dp_mst_atomic_check(state); /* * If we got slots >= 0 and we can fit those based on check * then we can exit the loop. Otherwise keep trying. */ if (!ret) break; } } /* We failed to find a proper bpp/timeslots, return error */ if (ret) slots = ret; if (slots < 0) { drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n", slots); } else { if (!dsc) crtc_state->pipe_bpp = bpp; else crtc_state->dsc.compressed_bpp = bpp; drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc); } return slots; } static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state, struct link_config_limits *limits) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int slots = -EINVAL; slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, limits->max_bpp, limits->min_bpp, limits, conn_state, 2 * 3, false); if (slots < 0) return slots; intel_link_compute_m_n(crtc_state->pipe_bpp, crtc_state->lane_count, adjusted_mode->crtc_clock, crtc_state->port_clock, &crtc_state->dp_m_n, crtc_state->fec_enable); crtc_state->dp_m_n.tu = slots; return 0; } static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state, struct link_config_limits *limits) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int slots = -EINVAL; int i, num_bpc; u8 dsc_bpc[3] = {0}; int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; u8 dsc_max_bpc; bool need_timeslot_recalc = false; u32 last_compressed_bpp; /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ if (DISPLAY_VER(i915) >= 12) dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); else dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc); max_bpp = min_t(u8, dsc_max_bpc * 3, limits->max_bpp); min_bpp = limits->min_bpp; num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, dsc_bpc); drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n", min_bpp, max_bpp); sink_max_bpp = dsc_bpc[0] * 3; sink_min_bpp = sink_max_bpp; for (i = 1; i < num_bpc; i++) { if (sink_min_bpp > dsc_bpc[i] * 3) sink_min_bpp = dsc_bpc[i] * 3; if (sink_max_bpp < dsc_bpc[i] * 3) sink_max_bpp = dsc_bpc[i] * 3; } drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n", sink_min_bpp, sink_max_bpp); if (min_bpp < sink_min_bpp) min_bpp = sink_min_bpp; if (max_bpp > sink_max_bpp) max_bpp = sink_max_bpp; slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp, min_bpp, limits, conn_state, 2 * 3, true); if (slots < 0) return slots; last_compressed_bpp = crtc_state->dsc.compressed_bpp; crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, last_compressed_bpp, crtc_state->pipe_bpp); if (crtc_state->dsc.compressed_bpp != last_compressed_bpp) need_timeslot_recalc = true; /* * Apparently some MST hubs dislike if vcpi slots are not matching precisely * the actual compressed bpp we use. */ if (need_timeslot_recalc) { slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, crtc_state->dsc.compressed_bpp, crtc_state->dsc.compressed_bpp, limits, conn_state, 2 * 3, true); if (slots < 0) return slots; } intel_link_compute_m_n(crtc_state->dsc.compressed_bpp, crtc_state->lane_count, adjusted_mode->crtc_clock, crtc_state->port_clock, &crtc_state->dp_m_n, crtc_state->fec_enable); crtc_state->dp_m_n.tu = slots; return 0; } static int intel_dp_mst_update_slots(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; struct drm_dp_mst_topology_state *topology_state; u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ? DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B; topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr); if (IS_ERR(topology_state)) { drm_dbg_kms(&i915->drm, "slot update failed\n"); return PTR_ERR(topology_state); } drm_dp_mst_update_slots(topology_state, link_coding_cap); return 0; } static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) { const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); struct intel_connector *connector = to_intel_connector(conn_state->connector); if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) return connector->base.display_info.has_audio; else return intel_conn_state->force_audio == HDMI_AUDIO_ON; } static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; pipe_config->has_audio = intel_dp_mst_has_audio(conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); /* * for MST we always configure max link bw - the spec doesn't * seem to suggest we should do otherwise. */ limits.min_rate = limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); /* * FIXME: If all the streams can't fit into the link with * their current pipe_bpp we should reduce pipe_bpp across * the board until things start to fit. Until then we * limit to <= 8bpc since that's what was hardcoded for all * MST streams previously. This hack should be removed once * we have the proper retry logic in place. */ limits.max_bpp = min(pipe_config->pipe_bpp, 24); intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); ret = intel_dp_mst_compute_link_config(encoder, pipe_config, conn_state, &limits); if (ret == -EDEADLK) return ret; /* enable compression if the mode doesn't fit available BW */ drm_dbg_kms(&dev_priv->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); if (ret || intel_dp->force_dsc_en) { /* * Try to get at least some timeslots and then see, if * we can fit there with DSC. */ drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n"); ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config, conn_state, &limits); if (ret < 0) return ret; ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, conn_state, &limits, pipe_config->dp_m_n.tu, false); } if (ret) return ret; ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state); if (ret) return ret; pipe_config->limited_color_range = intel_dp_limited_color_range(pipe_config, conn_state); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); return 0; } /* * Iterate over all connectors and return a mask of * all CPU transcoders streaming over the same DP link. */ static unsigned int intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, struct intel_dp *mst_port) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_digital_connector_state *conn_state; struct intel_connector *connector; u8 transcoders = 0; int i; if (DISPLAY_VER(dev_priv) < 12) return 0; for_each_new_intel_connector_in_state(state, connector, conn_state, i) { const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (connector->mst_port != mst_port || !conn_state->base.crtc) continue; crtc = to_intel_crtc(conn_state->base.crtc); crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->hw.active) continue; transcoders |= BIT(crtc_state->cpu_transcoder); } return transcoders; } static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; /* lowest numbered transcoder will be designated master */ crtc_state->mst_master_transcoder = ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1; return 0; } /* * If one of the connectors in a MST stream needs a modeset, mark all CRTCs * that shares the same MST stream as mode changed, * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do * a fastset when possible. */ static int intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; int ret = 0; if (DISPLAY_VER(dev_priv) < 12) return 0; if (!intel_connector_needs_modeset(state, &connector->base)) return 0; drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter); for_each_intel_connector_iter(connector_iter, &connector_list_iter) { struct intel_digital_connector_state *conn_iter_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; if (connector_iter->mst_port != connector->mst_port || connector_iter == connector) continue; conn_iter_state = intel_atomic_get_digital_connector_state(state, connector_iter); if (IS_ERR(conn_iter_state)) { ret = PTR_ERR(conn_iter_state); break; } if (!conn_iter_state->base.crtc) continue; crtc = to_intel_crtc(conn_iter_state->base.crtc); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); break; } ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); if (ret) break; crtc_state->uapi.mode_changed = true; } drm_connector_list_iter_end(&connector_list_iter); return ret; } static int intel_dp_mst_atomic_check(struct drm_connector *connector, struct drm_atomic_state *_state) { struct intel_atomic_state *state = to_intel_atomic_state(_state); struct intel_connector *intel_connector = to_intel_connector(connector); int ret; ret = intel_digital_connector_atomic_check(connector, &state->base); if (ret) return ret; ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state); if (ret) return ret; return drm_dp_atomic_release_time_slots(&state->base, &intel_connector->mst_port->mst_mgr, intel_connector->port); } static void clear_act_sent(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_ACT_SENT); } static void wait_for_act_sent(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_ACT_SENT, 1)) drm_err(&i915->drm, "Timed out waiting for ACT sent\n"); drm_dp_check_act_status(&intel_dp->mst_mgr); } static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_dp_mst_topology_state *old_mst_state = drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr); struct drm_dp_mst_topology_state *new_mst_state = drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); const struct drm_dp_mst_atomic_payload *old_payload = drm_atomic_get_mst_payload_state(old_mst_state, connector->port); struct drm_dp_mst_atomic_payload *new_payload = drm_atomic_get_mst_payload_state(new_mst_state, connector->port); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_dbg_kms(&i915->drm, "active links %d\n", intel_dp->active_mst_links); intel_hdcp_disable(intel_mst->connector); drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state, old_payload, new_payload); intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); } static void intel_mst_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); bool last_mst_stream; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 12 && last_mst_stream && !intel_dp_mst_is_master_trans(old_crtc_state)); intel_crtc_vblank_off(old_crtc_state); intel_disable_transcoder(old_crtc_state); clear_act_sent(encoder, old_crtc_state); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0); wait_for_act_sent(encoder, old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state); if (DISPLAY_VER(dev_priv) >= 9) skl_scaler_disable(old_crtc_state); else ilk_pfit_disable(old_crtc_state); /* * Power down mst path before disabling the port, otherwise we end * up getting interrupts from the sink upon detecting link loss. */ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, false); /* * BSpec 4287: disable DIP after the transcoder is disabled and before * the transcoder clock select is set to none. */ if (last_mst_stream) intel_dp_set_infoframes(&dig_port->base, false, old_crtc_state, NULL); /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" * * From older GENs spec: "Configure Transcoder Clock Select to direct * no clock to the transcoder" */ if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream) intel_ddi_disable_transcoder_clock(old_crtc_state); intel_mst->connector = NULL; if (last_mst_stream) dig_port->base.post_disable(state, &dig_port->base, old_crtc_state, NULL); drm_dbg_kms(&dev_priv->drm, "active links %d\n", intel_dp->active_mst_links); } static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; if (intel_dp->active_mst_links == 0 && dig_port->base.post_pll_disable) dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state); } static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; if (intel_dp->active_mst_links == 0) dig_port->base.pre_pll_enable(state, &dig_port->base, pipe_config, NULL); else /* * The port PLL state needs to get updated for secondary * streams as for the primary stream. */ intel_ddi_update_active_dpll(state, &dig_port->base, to_intel_crtc(pipe_config->uapi.crtc)); } static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); int ret; bool first_mst_stream; /* MST encoders are bound to a crtc, not to a connector, * force the mapping here for get_hw_state. */ connector->encoder = encoder; intel_mst->connector = connector; first_mst_stream = intel_dp->active_mst_links == 0; drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 12 && first_mst_stream && !intel_dp_mst_is_master_trans(pipe_config)); drm_dbg_kms(&dev_priv->drm, "active links %d\n", intel_dp->active_mst_links); if (first_mst_stream) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); if (first_mst_stream) dig_port->base.pre_enable(state, &dig_port->base, pipe_config, NULL); intel_dp->active_mst_links++; ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state, drm_atomic_get_mst_payload_state(mst_state, connector->port)); if (ret < 0) drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n", connector->base.name, ret); /* * Before Gen 12 this is not done as part of * dig_port->base.pre_enable() and should be done here. For * Gen 12+ the step in which this should be done is different for the * first MST stream, so it's done on the DDI for the first stream and * here for the following ones. */ if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) intel_ddi_enable_transcoder_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); } static void intel_mst_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); enum transcoder trans = pipe_config->cpu_transcoder; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); clear_act_sent(encoder, pipe_config); if (intel_dp_is_uhbr(pipe_config)) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock); intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder), TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24)); intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder), TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); } intel_ddi_enable_transcoder_func(encoder, pipe_config); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0, TRANS_DDI_DP_VC_PAYLOAD_ALLOC); drm_dbg_kms(&dev_priv->drm, "active links %d\n", intel_dp->active_mst_links); wait_for_act_sent(encoder, pipe_config); drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, drm_atomic_get_mst_payload_state(mst_state, connector->port)); if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0, FECSTALL_DIS_DPTSTREAM_DPTTG); else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, FECSTALL_DIS_DPTSTREAM_DPTTG); intel_enable_transcoder(pipe_config); intel_crtc_vblank_on(pipe_config); intel_audio_codec_enable(encoder, pipe_config, conn_state); /* Enable hdcp if it's desired */ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) intel_hdcp_enable(state, encoder, pipe_config, conn_state); } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); *pipe = intel_mst->pipe; if (intel_mst->connector) return true; return false; } static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; dig_port->base.get_config(&dig_port->base, pipe_config); } static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; return intel_dp_initial_fastset_check(&dig_port->base, crtc_state); } static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; const struct drm_edid *drm_edid; int ret; if (drm_connector_is_unregistered(connector)) return intel_connector_update_modes(connector, NULL); drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port); ret = intel_connector_update_modes(connector, drm_edid); drm_edid_free(drm_edid); return ret; } static int intel_dp_mst_connector_late_register(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); int ret; ret = drm_dp_mst_connector_late_register(connector, intel_connector->port); if (ret < 0) return ret; ret = intel_connector_register(connector); if (ret < 0) drm_dp_mst_connector_early_unregister(connector, intel_connector->port); return ret; } static void intel_dp_mst_connector_early_unregister(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); intel_connector_unregister(connector); drm_dp_mst_connector_early_unregister(connector, intel_connector->port); } static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_dp_mst_connector_late_register, .early_unregister = intel_dp_mst_connector_early_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static int intel_dp_mst_get_modes(struct drm_connector *connector) { return intel_dp_mst_get_ddc_modes(connector); } static int intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, struct drm_display_mode *mode, struct drm_modeset_acquire_ctx *ctx, enum drm_mode_status *status) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; struct drm_dp_mst_port *port = intel_connector->port; const int min_bpp = 18; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; int max_rate, mode_rate, max_lanes, max_link_clock; int ret; bool dsc = false, bigjoiner = false; u16 dsc_max_output_bpp = 0; u8 dsc_slice_count = 0; int target_clock = mode->clock; if (drm_connector_is_unregistered(connector)) { *status = MODE_ERROR; return 0; } if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { *status = MODE_NO_DBLESCAN; return 0; } max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(mode->clock, min_bpp); ret = drm_modeset_lock(&mgr->base.lock, ctx); if (ret) return ret; if (mode_rate > max_rate || mode->clock > max_dotclk || drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) { *status = MODE_CLOCK_HIGH; return 0; } if (mode->clock < 10000) { *status = MODE_CLOCK_LOW; return 0; } if (mode->flags & DRM_MODE_FLAG_DBLCLK) { *status = MODE_H_ILLEGAL; return 0; } if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { bigjoiner = true; max_dotclk *= 2; } if (DISPLAY_VER(dev_priv) >= 10 && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { /* * TBD pass the connector BPC, * for now U8_MAX so that max BPC on that platform would be picked */ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { dsc_max_output_bpp = intel_dp_dsc_get_output_bpp(dev_priv, max_link_clock, max_lanes, target_clock, mode->hdisplay, bigjoiner, pipe_bpp, 64) >> 4; dsc_slice_count = intel_dp_dsc_get_slice_count(intel_dp, target_clock, mode->hdisplay, bigjoiner); } dsc = dsc_max_output_bpp && dsc_slice_count; } /* * Big joiner configuration needs DSC for TGL which is not true for * XE_LPD where uncompressed joiner is supported. */ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) return MODE_CLOCK_HIGH; if (mode_rate > max_rate && !dsc) return MODE_CLOCK_HIGH; *status = intel_mode_valid_max_plane_size(dev_priv, mode, false); return 0; } static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); return &intel_dp->mst_encoders[crtc->pipe]->base.base; } static int intel_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; if (!INTEL_DISPLAY_ENABLED(i915)) return connector_status_disconnected; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, intel_connector->port); } static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid_ctx = intel_dp_mst_mode_valid_ctx, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, .detect_ctx = intel_dp_mst_detect, }; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); drm_encoder_cleanup(encoder); kfree(intel_mst); } static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { .destroy = intel_dp_mst_encoder_destroy, }; static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) { if (intel_attached_encoder(connector) && connector->base.state->crtc) { enum pipe pipe; if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe)) return false; return true; } return false; } static int intel_dp_mst_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector, const char *pathprop) { struct drm_i915_private *i915 = to_i915(connector->dev); drm_object_attach_property(&connector->base, i915->drm.mode_config.path_property, 0); drm_object_attach_property(&connector->base, i915->drm.mode_config.tile_property, 0); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); /* * Reuse the prop from the SST connector because we're * not allowed to create new props after device registration. */ connector->max_bpc_property = intel_dp->attached_connector->base.max_bpc_property; if (connector->max_bpc_property) drm_connector_attach_max_bpc_property(connector, 6, 12); return drm_connector_set_path_property(connector, pathprop); } static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) { struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *intel_connector; struct drm_connector *connector; enum pipe pipe; int ret; intel_connector = intel_connector_alloc(); if (!intel_connector) return NULL; intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; drm_dp_mst_get_port_malloc(port); connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { drm_dp_mst_put_port_malloc(port); intel_connector_free(intel_connector); return NULL; } drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); for_each_pipe(dev_priv, pipe) { struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; ret = drm_connector_attach_encoder(&intel_connector->base, enc); if (ret) goto err; } ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop); if (ret) goto err; ret = intel_dp_hdcp_init(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n", connector->name, connector->base.id); return connector; err: drm_connector_cleanup(connector); return NULL; } static void intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) { struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); } static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, .poll_hpd_irq = intel_dp_mst_poll_hpd_irq, }; static struct intel_dp_mst_encoder * intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe) { struct intel_dp_mst_encoder *intel_mst; struct intel_encoder *intel_encoder; struct drm_device *dev = dig_port->base.base.dev; intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); if (!intel_mst) return NULL; intel_mst->pipe = pipe; intel_encoder = &intel_mst->base; intel_mst->primary = dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->power_domain = dig_port->base.power_domain; intel_encoder->port = dig_port->base.port; intel_encoder->cloneable = 0; /* * This is wrong, but broken userspace uses the intersection * of possible_crtcs of all the encoders of a given connector * to figure out which crtcs can drive said connector. What * should be used instead is the union of possible_crtcs. * To keep such userspace functioning we must misconfigure * this to make sure the intersection is not empty :( */ intel_encoder->pipe_mask = ~0; intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; intel_encoder->get_config = intel_dp_mst_enc_get_config; intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check; return intel_mst; } static bool intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port) { struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe); return true; } int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port) { return dig_port->dp.active_mst_links; } int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; enum port port = dig_port->base.port; int ret; if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) return 0; if (DISPLAY_VER(i915) < 12 && port == PORT_A) return 0; if (DISPLAY_VER(i915) < 11 && port == PORT_E) return 0; intel_dp->mst_mgr.cbs = &mst_cbs; /* create encoders */ intel_dp_create_fake_mst_encoders(dig_port); ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, &intel_dp->aux, 16, 3, conn_base_id); if (ret) { intel_dp->mst_mgr.cbs = NULL; return ret; } return 0; } bool intel_dp_mst_source_support(struct intel_dp *intel_dp) { return intel_dp->mst_mgr.cbs; } void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) { struct intel_dp *intel_dp = &dig_port->dp; if (!intel_dp_mst_source_support(intel_dp)) return; drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); /* encoders will get killed by normal cleanup */ intel_dp->mst_mgr.cbs = NULL; } bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) { return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; } bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) { return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; } /** * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector * @state: atomic state * @connector: connector to add the state for * @crtc: the CRTC @connector is attached to * * Add the MST topology state for @connector to @state. * * Returns 0 on success, negative error code on failure. */ static int intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, struct intel_connector *connector, struct intel_crtc *crtc) { struct drm_dp_mst_topology_state *mst_state; if (!connector->mst_port) return 0; mst_state = drm_atomic_get_mst_topology_state(&state->base, &connector->mst_port->mst_mgr); if (IS_ERR(mst_state)) return PTR_ERR(mst_state); mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base); return 0; } /** * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC * @state: atomic state * @crtc: CRTC to add the state for * * Add the MST topology state for @crtc to @state. * * Returns 0 on success, negative error code on failure. */ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_connector *_connector; struct drm_connector_state *conn_state; int i; for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { struct intel_connector *connector = to_intel_connector(_connector); int ret; if (conn_state->crtc != &crtc->base) continue; ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc); if (ret) return ret; } return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_dp_mst.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation * */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_vrr.h" bool intel_vrr_is_capable(struct intel_connector *connector) { const struct drm_display_info *info = &connector->base.display_info; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp; /* * DP Sink is capable of VRR video timings if * Ignore MSA bit is set in DPCD. * EDID monitor range also should be atleast 10 for reasonable * Adaptive Sync or Variable Refresh Rate end user experience. */ switch (connector->base.connector_type) { case DRM_MODE_CONNECTOR_eDP: if (!connector->panel.vbt.vrr) return false; fallthrough; case DRM_MODE_CONNECTOR_DisplayPort: intel_dp = intel_attached_dp(connector); if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd)) return false; break; default: return false; } return HAS_VRR(i915) && info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10; } void intel_vrr_check_modeset(struct intel_atomic_state *state) { int i; struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->uapi.vrr_enabled != old_crtc_state->uapi.vrr_enabled) new_crtc_state->uapi.mode_changed = true; } } /* * Without VRR registers get latched at: * vblank_start * * With VRR the earliest registers can get latched is: * intel_vrr_vmin_vblank_start(), which if we want to maintain * the correct min vtotal is >=vblank_start+1 * * The latest point registers can get latched is the vmax decision boundary: * intel_vrr_vmax_vblank_start() * * Between those two points the vblank exit starts (and hence registers get * latched) ASAP after a push is sent. * * framestart_delay is programmable 1-4. */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (DISPLAY_VER(i915) >= 13) return crtc_state->vrr.guardband; else /* The hw imposes the extra scanline before frame start */ return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1; } int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state) { /* Min vblank actually determined by flipline that is always >=vmin+1 */ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state); } int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state) { return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state); } void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct drm_display_info *info = &connector->base.display_info; int vmin, vmax; if (!intel_vrr_is_capable(connector)) return; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return; vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); vmax = adjusted_mode->crtc_clock * 1000 / (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq); vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal); vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal); if (vmin >= vmax) return; /* * flipline determines the min vblank length the hardware will * generate, and flipline>=vmin+1, hence we reduce vmin by one * to make sure we can get the actual min vblank length. */ crtc_state->vrr.vmin = vmin - 1; crtc_state->vrr.vmax = vmax; crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1; /* * For XE_LPD+, we use guardband and pipeline override * is deprecated. */ if (DISPLAY_VER(i915) >= 13) { crtc_state->vrr.guardband = crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; } else { crtc_state->vrr.pipeline_full = min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - crtc_state->framestart_delay - 1); } if (crtc_state->uapi.vrr_enabled) { crtc_state->vrr.enable = true; crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } } static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (DISPLAY_VER(i915) >= 13) return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN | XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband); else return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN | VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) | VRR_CTL_PIPELINE_FULL_OVERRIDE; } void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* * TRANS_SET_CONTEXT_LATENCY with VRR enabled * requires this chicken bit on ADL/DG2. */ if (DISPLAY_VER(dev_priv) == 13) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, PIPE_VBLANK_WITH_DELAY); if (!crtc_state->vrr.flipline) { intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0); return; } intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1); intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1); intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state)); intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1); } void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return; intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN | TRANS_PUSH_SEND); } bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return false; return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; } void intel_vrr_enable(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (!crtc_state->vrr.enable) return; intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN); intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); } void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; if (!old_crtc_state->vrr.enable) return; intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(old_crtc_state)); intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0); } void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 trans_vrr_ctl; trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder)); crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; if (DISPLAY_VER(dev_priv) >= 13) crtc_state->vrr.guardband = REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl); else if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl); if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) { crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1; crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1; crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1; } if (crtc_state->vrr.enable) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; }
linux-master
drivers/gpu/drm/i915/display/intel_vrr.c
/* * Copyright (c) 2007 Dave Airlie <[email protected]> * Copyright (c) 2007, 2010 Intel Corporation * Jesse Barnes <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/i2c.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "intel_backlight.h" #include "intel_connector.h" #include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_panel.h" int intel_connector_init(struct intel_connector *connector) { struct intel_digital_connector_state *conn_state; /* * Allocate enough memory to hold intel_digital_connector_state, * This might be a few bytes too many, but for connectors that don't * need it we'll free the state and allocate a smaller one on the first * successful commit anyway. */ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); if (!conn_state) return -ENOMEM; __drm_atomic_helper_connector_reset(&connector->base, &conn_state->base); intel_panel_init_alloc(connector); return 0; } struct intel_connector *intel_connector_alloc(void) { struct intel_connector *connector; connector = kzalloc(sizeof(*connector), GFP_KERNEL); if (!connector) return NULL; if (intel_connector_init(connector) < 0) { kfree(connector); return NULL; } return connector; } /* * Free the bits allocated by intel_connector_alloc. * This should only be used after intel_connector_alloc has returned * successfully, and before drm_connector_init returns successfully. * Otherwise the destroy callbacks for the connector and the state should * take care of proper cleanup/free (see intel_connector_destroy). */ void intel_connector_free(struct intel_connector *connector) { kfree(to_intel_digital_connector_state(connector->base.state)); kfree(connector); } /* * Connector type independent destroy hook for drm_connector_funcs. */ void intel_connector_destroy(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); drm_edid_free(intel_connector->detect_edid); intel_hdcp_cleanup(intel_connector); intel_panel_fini(intel_connector); drm_connector_cleanup(connector); if (intel_connector->port) drm_dp_mst_put_port_malloc(intel_connector->port); kfree(connector); } int intel_connector_register(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); int ret; ret = intel_backlight_device_register(intel_connector); if (ret) goto err; if (i915_inject_probe_failure(to_i915(connector->dev))) { ret = -EFAULT; goto err_backlight; } intel_connector_debugfs_add(intel_connector); return 0; err_backlight: intel_backlight_device_unregister(intel_connector); err: return ret; } void intel_connector_unregister(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); intel_backlight_device_unregister(intel_connector); } void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { connector->encoder = encoder; drm_connector_attach_encoder(&connector->base, &encoder->base); } /* * Simple connector->get_hw_state implementation for encoders that support only * one connector and no cloning and hence the encoder state determines the state * of the connector. */ bool intel_connector_get_hw_state(struct intel_connector *connector) { enum pipe pipe = 0; struct intel_encoder *encoder = intel_attached_encoder(connector); return encoder->get_hw_state(encoder, &pipe); } enum pipe intel_connector_get_pipe(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; drm_WARN_ON(dev, !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); if (!connector->base.state->crtc) return INVALID_PIPE; return to_intel_crtc(connector->base.state->crtc)->pipe; } /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use * @drm_edid: previously read EDID information */ int intel_connector_update_modes(struct drm_connector *connector, const struct drm_edid *drm_edid) { int ret; drm_edid_connector_update(connector, drm_edid); ret = drm_edid_connector_add_modes(connector); return ret; } /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use * @adapter: i2c adapter * * Fetch the EDID information from @connector using the DDC bus. */ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { const struct drm_edid *drm_edid; int ret; drm_edid = drm_edid_read_ddc(connector, adapter); if (!drm_edid) return 0; ret = intel_connector_update_modes(connector, drm_edid); drm_edid_free(drm_edid); return ret; } static const struct drm_prop_enum_list force_audio_names[] = { { HDMI_AUDIO_OFF_DVI, "force-dvi" }, { HDMI_AUDIO_OFF, "off" }, { HDMI_AUDIO_AUTO, "auto" }, { HDMI_AUDIO_ON, "on" }, }; void intel_attach_force_audio_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->display.properties.force_audio; if (prop == NULL) { prop = drm_property_create_enum(dev, 0, "audio", force_audio_names, ARRAY_SIZE(force_audio_names)); if (prop == NULL) return; dev_priv->display.properties.force_audio = prop; } drm_object_attach_property(&connector->base, prop, 0); } static const struct drm_prop_enum_list broadcast_rgb_names[] = { { INTEL_BROADCAST_RGB_AUTO, "Automatic" }, { INTEL_BROADCAST_RGB_FULL, "Full" }, { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, }; void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; prop = dev_priv->display.properties.broadcast_rgb; if (prop == NULL) { prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Broadcast RGB", broadcast_rgb_names, ARRAY_SIZE(broadcast_rgb_names)); if (prop == NULL) return; dev_priv->display.properties.broadcast_rgb = prop; } drm_object_attach_property(&connector->base, prop, 0); } void intel_attach_aspect_ratio_property(struct drm_connector *connector) { if (!drm_mode_create_aspect_ratio_property(connector->dev)) drm_object_attach_property(&connector->base, connector->dev->mode_config.aspect_ratio_property, DRM_MODE_PICTURE_ASPECT_NONE); } void intel_attach_hdmi_colorspace_property(struct drm_connector *connector) { if (!drm_mode_create_hdmi_colorspace_property(connector, 0)) drm_connector_attach_colorspace_property(connector); } void intel_attach_dp_colorspace_property(struct drm_connector *connector) { if (!drm_mode_create_dp_colorspace_property(connector, 0)) drm_connector_attach_colorspace_property(connector); } void intel_attach_scaling_mode_property(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); u32 scaling_modes; scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); /* On GMCH platforms borders are only possible on the LVDS port */ if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS) scaling_modes |= BIT(DRM_MODE_SCALE_CENTER); drm_connector_attach_scaling_mode_property(connector, scaling_modes); connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; }
linux-master
drivers/gpu/drm/i915/display/intel_connector.c
/* * Copyright © 2006-2007 Intel Corporation * Copyright (c) 2006 Dave Airlie <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <[email protected]> * Dave Airlie <[email protected]> * Jesse Barnes <[email protected]> */ #include <acpi/button.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/vga_switcheroo.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpll.h" #include "intel_fdi.h" #include "intel_gmbus.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_pps_regs.h" /* Private structure for the integrated LVDS support */ struct intel_lvds_pps { /* 100us units */ int t1_t2; int t3; int t4; int t5; int tx; int divider; int port; bool powerdown_on_reset; }; struct intel_lvds_encoder { struct intel_encoder base; bool is_dual_link; i915_reg_t reg; u32 a3_power; struct intel_lvds_pps init_pps; u32 init_lvds_val; struct intel_connector *attached_connector; }; static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder) { return container_of(encoder, struct intel_lvds_encoder, base); } bool intel_lvds_port_enabled(struct drm_i915_private *i915, i915_reg_t lvds_reg, enum pipe *pipe) { u32 val; val = intel_de_read(i915, lvds_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(i915)) *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val); else *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val); return val & LVDS_PORT_EN; } static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); intel_wakeref_t wakeref; bool ret; wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain); if (!wakeref) return false; ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe); intel_display_power_put(i915, encoder->power_domain, wakeref); return ret; } static void intel_lvds_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); u32 tmp, flags = 0; crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS); tmp = intel_de_read(dev_priv, lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) flags |= DRM_MODE_FLAG_NHSYNC; else flags |= DRM_MODE_FLAG_PHSYNC; if (tmp & LVDS_VSYNC_POLARITY) flags |= DRM_MODE_FLAG_NVSYNC; else flags |= DRM_MODE_FLAG_PVSYNC; crtc_state->hw.adjusted_mode.flags |= flags; if (DISPLAY_VER(dev_priv) < 5) crtc_state->gmch_pfit.lvds_border_bits = tmp & LVDS_BORDER_ENABLE; /* gen2/3 store dither state in pfit control, needs to match */ if (DISPLAY_VER(dev_priv) < 4) { tmp = intel_de_read(dev_priv, PFIT_CONTROL); crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE; } crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; } static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, struct intel_lvds_pps *pps) { u32 val; pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET; val = intel_de_read(dev_priv, PP_ON_DELAYS(0)); pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val); pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val); pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val); val = intel_de_read(dev_priv, PP_OFF_DELAYS(0)); pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val); pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val); val = intel_de_read(dev_priv, PP_DIVISOR(0)); pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val); val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val); /* * Remove the BSpec specified +1 (100ms) offset that accounts for a * too short power-cycle delay due to the asynchronous programming of * the register. */ if (val) val--; /* Convert from 100ms to 100us units */ pps->t4 = val * 1000; if (DISPLAY_VER(dev_priv) <= 4 && pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { drm_dbg_kms(&dev_priv->drm, "Panel power timings uninitialized, " "setting defaults\n"); /* Set T2 to 40ms and T5 to 200ms in 100 usec units */ pps->t1_t2 = 40 * 10; pps->t5 = 200 * 10; /* Set T3 to 35ms and Tx to 200ms in 100 usec units */ pps->t3 = 35 * 10; pps->tx = 200 * 10; } drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d " "divider %d port %d powerdown_on_reset %d\n", pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx, pps->divider, pps->port, pps->powerdown_on_reset); } static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, struct intel_lvds_pps *pps) { u32 val; val = intel_de_read(dev_priv, PP_CONTROL(0)); drm_WARN_ON(&dev_priv->drm, (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); if (pps->powerdown_on_reset) val |= PANEL_POWER_RESET; intel_de_write(dev_priv, PP_CONTROL(0), val); intel_de_write(dev_priv, PP_ON_DELAYS(0), REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); intel_de_write(dev_priv, PP_OFF_DELAYS(0), REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); intel_de_write(dev_priv, PP_DIVISOR(0), REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum pipe pipe = crtc->pipe; u32 temp; if (HAS_PCH_SPLIT(i915)) { assert_fdi_rx_pll_disabled(i915, pipe); assert_shared_dpll_disabled(i915, crtc_state->shared_dpll); } else { assert_pll_disabled(i915, pipe); } intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps); temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (HAS_PCH_CPT(i915)) { temp &= ~LVDS_PIPE_SEL_MASK_CPT; temp |= LVDS_PIPE_SEL_CPT(pipe); } else { temp &= ~LVDS_PIPE_SEL_MASK; temp |= LVDS_PIPE_SEL(pipe); } /* set the corresponsding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; temp |= crtc_state->gmch_pfit.lvds_border_bits; /* * Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (lvds_encoder->is_dual_link) temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. For now, let's just maintain the * value we got from the BIOS. */ temp &= ~LVDS_A3_POWER_MASK; temp |= lvds_encoder->a3_power; /* * Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the TRANSCONF reg. */ if (DISPLAY_VER(i915) == 4) { /* * Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ if (crtc_state->dither && crtc_state->pipe_bpp == 18) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; } temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) temp |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; intel_de_write(i915, lvds_encoder->reg, temp); } /* * Sets the power state for the panel. */ static void intel_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN); intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON); intel_de_posting_read(dev_priv, lvds_encoder->reg); if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power on\n"); intel_backlight_enable(crtc_state, conn_state); } static void intel_disable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0); if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power off\n"); intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0); intel_de_posting_read(dev_priv, lvds_encoder->reg); } static void gmch_disable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_backlight_disable(old_conn_state); intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } static void pch_disable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_backlight_disable(old_conn_state); } static void pch_post_disable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } static void intel_lvds_shutdown(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_CYCLE_DELAY_ACTIVE, 5000)) drm_err(&dev_priv->drm, "timed out waiting for panel power cycle delay\n"); } static enum drm_mode_status intel_lvds_mode_valid(struct drm_connector *_connector, struct drm_display_mode *mode) { struct intel_connector *connector = to_intel_connector(_connector); const struct drm_display_mode *fixed_mode = intel_panel_fixed_mode(connector, mode); int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq; enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; if (fixed_mode->clock > max_pixclk) return MODE_CLOCK_HIGH; return MODE_OK; } static int intel_lvds_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct intel_connector *connector = lvds_encoder->attached_connector; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); unsigned int lvds_bpp; int ret; /* Should never happen!! */ if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) { drm_err(&i915->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } if (lvds_encoder->a3_power == LVDS_A3_POWER_UP) lvds_bpp = 8*3; else lvds_bpp = 6*3; if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) { drm_dbg_kms(&i915->drm, "forcing display bpp (was %d) to LVDS (%d)\n", crtc_state->pipe_bpp, lvds_bpp); crtc_state->pipe_bpp = lvds_bpp; } crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; /* * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ ret = intel_panel_compute_config(connector, adjusted_mode); if (ret) return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; if (HAS_PCH_SPLIT(i915)) crtc_state->has_pch_encoder = true; ret = intel_panel_fitting(crtc_state, conn_state); if (ret) return ret; /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the * user's requested refresh rate. */ return 0; } /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ static int intel_lvds_get_modes(struct drm_connector *_connector) { struct intel_connector *connector = to_intel_connector(_connector); const struct drm_edid *fixed_edid = connector->panel.fixed_edid; /* Use panel fixed edid if we have one */ if (!IS_ERR_OR_NULL(fixed_edid)) { drm_edid_connector_update(&connector->base, fixed_edid); return drm_edid_connector_add_modes(&connector->base); } return intel_panel_get_modes(connector); } static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, .atomic_check = intel_digital_connector_atomic_check, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { .detect = intel_panel_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_encoder_destroy, }; static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; } /* These systems claim to have LVDS, but really don't */ static const struct dmi_system_id intel_no_lvds[] = { { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core series)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core 2 series)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "MSI IM-945GSE-A", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MSI"), DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Dell Studio Hybrid", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Dell OptiPlex FX170", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen Mini PC", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen Mini PC MP915", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen i915GMm-HFS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "AOpen i45GMx-I", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Clientron U800", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), DMI_MATCH(DMI_PRODUCT_NAME, "U800"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Clientron E830", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), DMI_MATCH(DMI_PRODUCT_NAME, "E830"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Asus EeeBox PC EB1007", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Asus AT5NM10T-I", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard HP t5740", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, " t5740"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard t5745", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Hewlett-Packard st5747", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "MSI Wind Box DC500", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Gigabyte GA-D525TUD", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Supermicro X7SPA-H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Fujitsu Esprimo Q900", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Intel D410PT", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), DMI_MATCH(DMI_BOARD_NAME, "D410PT"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Intel D425KT", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Intel D510MO", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Intel D525MW", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), }, }, { .callback = intel_no_lvds_dmi_callback, .ident = "Radiant P845", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "P845"), }, }, { } /* terminating entry */ }; static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) { DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); return 1; } static const struct dmi_system_id intel_dual_link_lvds[] = { { .callback = intel_dual_link_lvds_callback, .ident = "Apple MacBook Pro 15\" (2010)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), }, }, { .callback = intel_dual_link_lvds_callback, .ident = "Apple MacBook Pro 15\" (2011)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), }, }, { .callback = intel_dual_link_lvds_callback, .ident = "Apple MacBook Pro 15\" (2012)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), }, }, { } /* terminating entry */ }; struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) { struct intel_encoder *encoder; for_each_intel_encoder(&i915->drm, encoder) { if (encoder->type == INTEL_OUTPUT_LVDS) return encoder; } return NULL; } bool intel_is_dual_link_lvds(struct drm_i915_private *i915) { struct intel_encoder *encoder = intel_get_lvds_encoder(i915); return encoder && to_lvds_encoder(encoder)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev); struct intel_connector *connector = lvds_encoder->attached_connector; const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); unsigned int val; /* use the module option value if specified */ if (i915->params.lvds_channel_mode > 0) return i915->params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (fixed_mode->clock > 112999) return true; if (dmi_check_system(intel_dual_link_lvds)) return true; /* * BIOS should set the proper LVDS register value at boot, but * in reality, it doesn't set the value when the lid is closed; * we need to check "the value to be set" in VBT when LVDS * register is uninitialized. */ val = intel_de_read(i915, lvds_encoder->reg); if (HAS_PCH_CPT(i915)) val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); else val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); if (val == 0) val = connector->panel.vbt.bios_lvds_val; return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; } static void intel_lvds_add_properties(struct drm_connector *connector) { intel_attach_scaling_mode_property(connector); } /** * intel_lvds_init - setup LVDS connectors on this device * @i915: i915 device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ void intel_lvds_init(struct drm_i915_private *i915) { struct intel_lvds_encoder *lvds_encoder; struct intel_connector *connector; const struct drm_edid *drm_edid; struct intel_encoder *encoder; i915_reg_t lvds_reg; u32 lvds; u8 pin; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } if (!i915->display.vbt.int_lvds_support) { drm_dbg_kms(&i915->drm, "Internal LVDS support disabled by VBT\n"); return; } if (HAS_PCH_SPLIT(i915)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; lvds = intel_de_read(i915, lvds_reg); if (HAS_PCH_SPLIT(i915)) { if ((lvds & LVDS_DETECTED) == 0) return; } pin = GMBUS_PIN_PANEL; if (!intel_bios_is_lvds_present(i915, &pin)) { if ((lvds & LVDS_PORT_EN) == 0) { drm_dbg_kms(&i915->drm, "LVDS is not present in VBT\n"); return; } drm_dbg_kms(&i915->drm, "LVDS is not present in VBT, but enabled anyway\n"); } lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); if (!lvds_encoder) return; connector = intel_connector_alloc(); if (!connector) { kfree(lvds_encoder); return; } lvds_encoder->attached_connector = connector; encoder = &lvds_encoder->base; drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); encoder->enable = intel_enable_lvds; encoder->pre_enable = intel_pre_enable_lvds; encoder->compute_config = intel_lvds_compute_config; if (HAS_PCH_SPLIT(i915)) { encoder->disable = pch_disable_lvds; encoder->post_disable = pch_post_disable_lvds; } else { encoder->disable = gmch_disable_lvds; } encoder->get_hw_state = intel_lvds_get_hw_state; encoder->get_config = intel_lvds_get_config; encoder->update_pipe = intel_backlight_update; encoder->shutdown = intel_lvds_shutdown; connector->get_hw_state = intel_connector_get_hw_state; intel_connector_attach_encoder(connector, encoder); encoder->type = INTEL_OUTPUT_LVDS; encoder->power_domain = POWER_DOMAIN_PORT_OTHER; encoder->port = PORT_NONE; encoder->cloneable = 0; if (DISPLAY_VER(i915) < 4) encoder->pipe_mask = BIT(PIPE_B); else encoder->pipe_mask = ~0; drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs); connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; lvds_encoder->reg = lvds_reg; intel_lvds_add_properties(&connector->base); intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps); lvds_encoder->init_lvds_val = lvds; /* * LVDS discovery: * 1) check for EDID on DDC * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel */ /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ mutex_lock(&i915->drm.mode_config.mutex); if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { drm_edid = drm_edid_read_switcheroo(&connector->base, intel_gmbus_get_adapter(i915, pin)); } else { drm_edid = drm_edid_read_ddc(&connector->base, intel_gmbus_get_adapter(i915, pin)); } if (drm_edid) { if (drm_edid_connector_update(&connector->base, drm_edid) || !drm_edid_connector_add_modes(&connector->base)) { drm_edid_connector_update(&connector->base, NULL); drm_edid_free(drm_edid); drm_edid = ERR_PTR(-EINVAL); } } else { drm_edid = ERR_PTR(-ENOENT); } intel_bios_init_panel_late(i915, &connector->panel, NULL, IS_ERR(drm_edid) ? NULL : drm_edid); /* Try EDID first */ intel_panel_add_edid_fixed_modes(connector, true); /* Failed to get EDID, what about VBT? */ if (!intel_panel_preferred_fixed_mode(connector)) intel_panel_add_vbt_lfp_fixed_mode(connector); /* * If we didn't get a fixed mode from EDID or VBT, try checking * if the panel is already turned on. If so, assume that * whatever is currently programmed is the correct mode. */ if (!intel_panel_preferred_fixed_mode(connector)) intel_panel_add_encoder_fixed_mode(connector, encoder); mutex_unlock(&i915->drm.mode_config.mutex); /* If we still don't have a mode after all that, give up. */ if (!intel_panel_preferred_fixed_mode(connector)) goto failed; intel_panel_init(connector, drm_edid); intel_backlight_setup(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; return; failed: drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n"); drm_connector_cleanup(&connector->base); drm_encoder_cleanup(&encoder->base); kfree(lvds_encoder); intel_connector_free(connector); return; }
linux-master
drivers/gpu/drm/i915/display/intel_lvds.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/i915_pciids.h> #include <drm/drm_color_mgmt.h> #include <linux/pci.h> #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_device.h" #include "intel_display_power.h" #include "intel_display_reg_defs.h" #include "intel_fbc.h" static const struct intel_display_device_info no_display = {}; #define PIPE_A_OFFSET 0x70000 #define PIPE_B_OFFSET 0x71000 #define PIPE_C_OFFSET 0x72000 #define PIPE_D_OFFSET 0x73000 #define CHV_PIPE_C_OFFSET 0x74000 /* * There's actually no pipe EDP. Some pipe registers have * simply shifted from the pipe to the transcoder, while * keeping their original offset. Thus we need PIPE_EDP_OFFSET * to access such registers in transcoder EDP. */ #define PIPE_EDP_OFFSET 0x7f000 /* ICL DSI 0 and 1 */ #define PIPE_DSI0_OFFSET 0x7b000 #define PIPE_DSI1_OFFSET 0x7b800 #define TRANSCODER_A_OFFSET 0x60000 #define TRANSCODER_B_OFFSET 0x61000 #define TRANSCODER_C_OFFSET 0x62000 #define CHV_TRANSCODER_C_OFFSET 0x63000 #define TRANSCODER_D_OFFSET 0x63000 #define TRANSCODER_EDP_OFFSET 0x6f000 #define TRANSCODER_DSI0_OFFSET 0x6b000 #define TRANSCODER_DSI1_OFFSET 0x6b800 #define CURSOR_A_OFFSET 0x70080 #define CURSOR_B_OFFSET 0x700c0 #define CHV_CURSOR_C_OFFSET 0x700e0 #define IVB_CURSOR_B_OFFSET 0x71080 #define IVB_CURSOR_C_OFFSET 0x72080 #define TGL_CURSOR_D_OFFSET 0x73080 #define I845_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ } #define I9XX_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ } #define IVB_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ } #define HSW_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ } #define CHV_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ } #define I845_CURSOR_OFFSETS \ .cursor_offsets = { \ [PIPE_A] = CURSOR_A_OFFSET, \ } #define I9XX_CURSOR_OFFSETS \ .cursor_offsets = { \ [PIPE_A] = CURSOR_A_OFFSET, \ [PIPE_B] = CURSOR_B_OFFSET, \ } #define CHV_CURSOR_OFFSETS \ .cursor_offsets = { \ [PIPE_A] = CURSOR_A_OFFSET, \ [PIPE_B] = CURSOR_B_OFFSET, \ [PIPE_C] = CHV_CURSOR_C_OFFSET, \ } #define IVB_CURSOR_OFFSETS \ .cursor_offsets = { \ [PIPE_A] = CURSOR_A_OFFSET, \ [PIPE_B] = IVB_CURSOR_B_OFFSET, \ [PIPE_C] = IVB_CURSOR_C_OFFSET, \ } #define TGL_CURSOR_OFFSETS \ .cursor_offsets = { \ [PIPE_A] = CURSOR_A_OFFSET, \ [PIPE_B] = IVB_CURSOR_B_OFFSET, \ [PIPE_C] = IVB_CURSOR_C_OFFSET, \ [PIPE_D] = TGL_CURSOR_D_OFFSET, \ } #define I845_COLORS \ .color = { .gamma_lut_size = 256 } #define I9XX_COLORS \ .color = { .gamma_lut_size = 129, \ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ } #define ILK_COLORS \ .color = { .gamma_lut_size = 1024 } #define IVB_COLORS \ .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 } #define CHV_COLORS \ .color = { \ .degamma_lut_size = 65, .gamma_lut_size = 257, \ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ } #define GLK_COLORS \ .color = { \ .degamma_lut_size = 33, .gamma_lut_size = 1024, \ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ } #define ICL_COLORS \ .color = { \ .degamma_lut_size = 33, .gamma_lut_size = 262145, \ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ } #define I830_DISPLAY \ .has_overlay = 1, \ .cursor_needs_physical = 1, \ .overlay_needs_physical = 1, \ .has_gmch = 1, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ \ .__runtime_defaults.ip.ver = 2, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) #define I845_DISPLAY \ .has_overlay = 1, \ .overlay_needs_physical = 1, \ .has_gmch = 1, \ I845_PIPE_OFFSETS, \ I845_CURSOR_OFFSETS, \ I845_COLORS, \ \ .__runtime_defaults.ip.ver = 2, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A), \ .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) static const struct intel_display_device_info i830_display = { I830_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C), /* DVO A/B/C */ }; static const struct intel_display_device_info i845_display = { I845_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ }; static const struct intel_display_device_info i85x_display = { I830_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info i865g_display = { I845_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_DISPLAY \ .has_gmch = 1, \ .has_overlay = 1, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ \ .__runtime_defaults.ip.ver = 3, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */ static const struct intel_display_device_info i915g_display = { GEN3_DISPLAY, I845_COLORS, .cursor_needs_physical = 1, .overlay_needs_physical = 1, }; static const struct intel_display_device_info i915gm_display = { GEN3_DISPLAY, I9XX_COLORS, .cursor_needs_physical = 1, .overlay_needs_physical = 1, .supports_tv = 1, .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info i945g_display = { GEN3_DISPLAY, I845_COLORS, .has_hotplug = 1, .cursor_needs_physical = 1, .overlay_needs_physical = 1, }; static const struct intel_display_device_info i945gm_display = { GEN3_DISPLAY, I9XX_COLORS, .has_hotplug = 1, .cursor_needs_physical = 1, .overlay_needs_physical = 1, .supports_tv = 1, .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info g33_display = { GEN3_DISPLAY, I845_COLORS, .has_hotplug = 1, }; static const struct intel_display_device_info pnv_display = { GEN3_DISPLAY, I9XX_COLORS, .has_hotplug = 1, }; #define GEN4_DISPLAY \ .has_hotplug = 1, \ .has_gmch = 1, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ \ .__runtime_defaults.ip.ver = 4, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) static const struct intel_display_device_info i965g_display = { GEN4_DISPLAY, .has_overlay = 1, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */ }; static const struct intel_display_device_info i965gm_display = { GEN4_DISPLAY, .has_overlay = 1, .supports_tv = 1, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info g45_display = { GEN4_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */ }; static const struct intel_display_device_info gm45_display = { GEN4_DISPLAY, .supports_tv = 1, .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; #define ILK_DISPLAY \ .has_hotplug = 1, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ \ .__runtime_defaults.ip.ver = 5, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ static const struct intel_display_device_info ilk_d_display = { ILK_DISPLAY, }; static const struct intel_display_device_info ilk_m_display = { ILK_DISPLAY, .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info snb_display = { .has_hotplug = 1, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, ILK_COLORS, .__runtime_defaults.ip.ver = 6, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info ivb_display = { .has_hotplug = 1, IVB_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, .__runtime_defaults.ip.ver = 7, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info vlv_display = { .has_gmch = 1, .has_hotplug = 1, .mmio_offset = VLV_DISPLAY_BASE, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, I9XX_COLORS, .__runtime_defaults.ip.ver = 7, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* HDMI/DP B/C */ }; static const struct intel_display_device_info hsw_display = { .has_ddi = 1, .has_dp_mst = 1, .has_fpga_dbg = 1, .has_hotplug = 1, .has_psr = 1, .has_psr_hw_tracking = 1, HSW_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, .__runtime_defaults.ip.ver = 7, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info bdw_display = { .has_ddi = 1, .has_dp_mst = 1, .has_fpga_dbg = 1, .has_hotplug = 1, .has_psr = 1, .has_psr_hw_tracking = 1, HSW_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, .__runtime_defaults.ip.ver = 8, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_display_device_info chv_display = { .has_hotplug = 1, .has_gmch = 1, .mmio_offset = VLV_DISPLAY_BASE, CHV_PIPE_OFFSETS, CHV_CURSOR_OFFSETS, CHV_COLORS, .__runtime_defaults.ip.ver = 8, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* HDMI/DP B/C/D */ }; static const struct intel_display_device_info skl_display = { .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ .dbuf.slice_mask = BIT(DBUF_S1), .has_ddi = 1, .has_dp_mst = 1, .has_fpga_dbg = 1, .has_hotplug = 1, .has_ipc = 1, .has_psr = 1, .has_psr_hw_tracking = 1, HSW_PIPE_OFFSETS, IVB_CURSOR_OFFSETS, IVB_COLORS, .__runtime_defaults.ip.ver = 9, .__runtime_defaults.has_dmc = 1, .__runtime_defaults.has_hdcp = 1, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN9_LP_DISPLAY \ .dbuf.slice_mask = BIT(DBUF_S1), \ .has_dp_mst = 1, \ .has_ddi = 1, \ .has_fpga_dbg = 1, \ .has_hotplug = 1, \ .has_ipc = 1, \ .has_psr = 1, \ .has_psr_hw_tracking = 1, \ HSW_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ IVB_COLORS, \ \ .__runtime_defaults.has_dmc = 1, \ .__runtime_defaults.has_hdcp = 1, \ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) static const struct intel_display_device_info bxt_display = { GEN9_LP_DISPLAY, .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */ .__runtime_defaults.ip.ver = 9, }; static const struct intel_display_device_info glk_display = { GEN9_LP_DISPLAY, .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ GLK_COLORS, .__runtime_defaults.ip.ver = 10, }; #define ICL_DISPLAY \ .abox_mask = BIT(0), \ .dbuf.size = 2048, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ .has_ddi = 1, \ .has_dp_mst = 1, \ .has_fpga_dbg = 1, \ .has_hotplug = 1, \ .has_ipc = 1, \ .has_psr = 1, \ .has_psr_hw_tracking = 1, \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ IVB_CURSOR_OFFSETS, \ ICL_COLORS, \ \ .__runtime_defaults.ip.ver = 11, \ .__runtime_defaults.has_dmc = 1, \ .__runtime_defaults.has_dsc = 1, \ .__runtime_defaults.has_hdcp = 1, \ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) static const struct intel_display_device_info icl_display = { ICL_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E), }; static const struct intel_display_device_info jsl_ehl_display = { ICL_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), }; #define XE_D_DISPLAY \ .abox_mask = GENMASK(2, 1), \ .dbuf.size = 2048, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ .has_ddi = 1, \ .has_dp_mst = 1, \ .has_dsb = 1, \ .has_fpga_dbg = 1, \ .has_hotplug = 1, \ .has_ipc = 1, \ .has_psr = 1, \ .has_psr_hw_tracking = 1, \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_D] = PIPE_D_OFFSET, \ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ TGL_CURSOR_OFFSETS, \ ICL_COLORS, \ \ .__runtime_defaults.ip.ver = 12, \ .__runtime_defaults.has_dmc = 1, \ .__runtime_defaults.has_dsc = 1, \ .__runtime_defaults.has_hdcp = 1, \ .__runtime_defaults.pipe_mask = \ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .__runtime_defaults.cpu_transcoder_mask = \ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) static const struct intel_display_device_info tgl_display = { XE_D_DISPLAY, /* * FIXME DDI C/combo PHY C missing due to combo PHY * code making a mess on SKUs where the PHY is missing. */ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6), }; static const struct intel_display_device_info dg1_display = { XE_D_DISPLAY, .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2), }; static const struct intel_display_device_info rkl_display = { XE_D_DISPLAY, .abox_mask = BIT(0), .has_hti = 1, .has_psr_hw_tracking = 0, .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2), }; static const struct intel_display_device_info adl_s_display = { XE_D_DISPLAY, .has_hti = 1, .has_psr_hw_tracking = 0, .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; #define XE_LPD_FEATURES \ .abox_mask = GENMASK(1, 0), \ .color = { \ .degamma_lut_size = 129, .gamma_lut_size = 1024, \ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ }, \ .dbuf.size = 4096, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ .has_ddi = 1, \ .has_dp_mst = 1, \ .has_dsb = 1, \ .has_fpga_dbg = 1, \ .has_hotplug = 1, \ .has_ipc = 1, \ .has_psr = 1, \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_D] = PIPE_D_OFFSET, \ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ TGL_CURSOR_OFFSETS, \ \ .__runtime_defaults.ip.ver = 13, \ .__runtime_defaults.has_dmc = 1, \ .__runtime_defaults.has_dsc = 1, \ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A), \ .__runtime_defaults.has_hdcp = 1, \ .__runtime_defaults.pipe_mask = \ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D) static const struct intel_display_device_info xe_lpd_display = { XE_LPD_FEATURES, .has_cdclk_crawl = 1, .has_psr_hw_tracking = 0, .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; static const struct intel_display_device_info xe_hpd_display = { XE_LPD_FEATURES, .has_cdclk_squash = 1, .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D_XELPD) | BIT(PORT_TC1), }; static const struct intel_display_device_info xe_lpdp_display = { XE_LPD_FEATURES, .has_cdclk_crawl = 1, .has_cdclk_squash = 1, .__runtime_defaults.ip.ver = 14, .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B), .__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4), }; /* * Separate detection for no display cases to keep the display id array simple. * * IVB Q requires subvendor and subdevice matching to differentiate from IVB D * GT2 server. */ static bool has_no_display(struct pci_dev *pdev) { static const struct pci_device_id ids[] = { INTEL_IVB_Q_IDS(0), {} }; return pci_match_id(ids, pdev); } #undef INTEL_VGA_DEVICE #define INTEL_VGA_DEVICE(id, info) { id, info } static const struct { u32 devid; const struct intel_display_device_info *info; } intel_display_ids[] = { INTEL_I830_IDS(&i830_display), INTEL_I845G_IDS(&i845_display), INTEL_I85X_IDS(&i85x_display), INTEL_I865G_IDS(&i865g_display), INTEL_I915G_IDS(&i915g_display), INTEL_I915GM_IDS(&i915gm_display), INTEL_I945G_IDS(&i945g_display), INTEL_I945GM_IDS(&i945gm_display), INTEL_I965G_IDS(&i965g_display), INTEL_G33_IDS(&g33_display), INTEL_I965GM_IDS(&i965gm_display), INTEL_GM45_IDS(&gm45_display), INTEL_G45_IDS(&g45_display), INTEL_PINEVIEW_G_IDS(&pnv_display), INTEL_PINEVIEW_M_IDS(&pnv_display), INTEL_IRONLAKE_D_IDS(&ilk_d_display), INTEL_IRONLAKE_M_IDS(&ilk_m_display), INTEL_SNB_D_IDS(&snb_display), INTEL_SNB_M_IDS(&snb_display), INTEL_IVB_M_IDS(&ivb_display), INTEL_IVB_D_IDS(&ivb_display), INTEL_HSW_IDS(&hsw_display), INTEL_VLV_IDS(&vlv_display), INTEL_BDW_IDS(&bdw_display), INTEL_CHV_IDS(&chv_display), INTEL_SKL_IDS(&skl_display), INTEL_BXT_IDS(&bxt_display), INTEL_GLK_IDS(&glk_display), INTEL_KBL_IDS(&skl_display), INTEL_CFL_IDS(&skl_display), INTEL_ICL_11_IDS(&icl_display), INTEL_EHL_IDS(&jsl_ehl_display), INTEL_JSL_IDS(&jsl_ehl_display), INTEL_TGL_12_IDS(&tgl_display), INTEL_DG1_IDS(&dg1_display), INTEL_RKL_IDS(&rkl_display), INTEL_ADLS_IDS(&adl_s_display), INTEL_RPLS_IDS(&adl_s_display), INTEL_ADLP_IDS(&xe_lpd_display), INTEL_ADLN_IDS(&xe_lpd_display), INTEL_RPLP_IDS(&xe_lpd_display), INTEL_DG2_IDS(&xe_hpd_display), /* * Do not add any GMD_ID-based platforms to this list. They will * be probed automatically based on the IP version reported by * the hardware. */ }; static const struct { u16 ver; u16 rel; const struct intel_display_device_info *display; } gmdid_display_map[] = { { 14, 0, &xe_lpdp_display }, }; static const struct intel_display_device_info * probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); void __iomem *addr; u32 val; int i; /* The caller expects to ver, rel and step to be initialized * here, and there's no good way to check when there was a * failure and no_display was returned. So initialize all these * values here zero, to be sure. */ *ver = 0; *rel = 0; *step = 0; addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32)); if (!addr) { drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n"); return &no_display; } val = ioread32(addr); pci_iounmap(pdev, addr); if (val == 0) { drm_dbg_kms(&i915->drm, "Device doesn't have display\n"); return &no_display; } *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val); *rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); *step = REG_FIELD_GET(GMD_ID_STEP, val); for (i = 0; i < ARRAY_SIZE(gmdid_display_map); i++) if (*ver == gmdid_display_map[i].ver && *rel == gmdid_display_map[i].rel) return gmdid_display_map[i].display; drm_err(&i915->drm, "Unrecognized display IP version %d.%02d; disabling display.\n", *ver, *rel); return &no_display; } const struct intel_display_device_info * intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, u16 *gmdid_ver, u16 *gmdid_rel, u16 *gmdid_step) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int i; if (has_gmdid) return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step); if (has_no_display(pdev)) { drm_dbg_kms(&i915->drm, "Device doesn't have display\n"); return &no_display; } for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) { if (intel_display_ids[i].devid == pdev->device) return intel_display_ids[i].info; } drm_dbg(&i915->drm, "No display ID found for device ID %04x; disabling display.\n", pdev->device); return &no_display; } void intel_display_device_info_runtime_init(struct drm_i915_private *i915) { struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); enum pipe pipe; BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES); BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->cpu_transcoder_mask) < I915_MAX_TRANSCODERS); BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS); /* Wa_14011765242: adl-s A0,A1 */ if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2)) for_each_pipe(i915, pipe) display_runtime->num_scalers[pipe] = 0; else if (DISPLAY_VER(i915) >= 11) { for_each_pipe(i915, pipe) display_runtime->num_scalers[pipe] = 2; } else if (DISPLAY_VER(i915) >= 9) { display_runtime->num_scalers[PIPE_A] = 2; display_runtime->num_scalers[PIPE_B] = 2; display_runtime->num_scalers[PIPE_C] = 1; } if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915)) for_each_pipe(i915, pipe) display_runtime->num_sprites[pipe] = 4; else if (DISPLAY_VER(i915) >= 11) for_each_pipe(i915, pipe) display_runtime->num_sprites[pipe] = 6; else if (DISPLAY_VER(i915) == 10) for_each_pipe(i915, pipe) display_runtime->num_sprites[pipe] = 3; else if (IS_BROXTON(i915)) { /* * Skylake and Broxton currently don't expose the topmost plane as its * use is exclusive with the legacy cursor and we only want to expose * one of those, not both. Until we can safely expose the topmost plane * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, * we don't expose the topmost plane at all to prevent ABI breakage * down the line. */ display_runtime->num_sprites[PIPE_A] = 2; display_runtime->num_sprites[PIPE_B] = 2; display_runtime->num_sprites[PIPE_C] = 1; } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { for_each_pipe(i915, pipe) display_runtime->num_sprites[pipe] = 2; } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) { for_each_pipe(i915, pipe) display_runtime->num_sprites[pipe] = 1; } if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) && !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) { drm_info(&i915->drm, "Display not present, disabling\n"); goto display_fused_off; } if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) { u32 fuse_strap = intel_de_read(i915, FUSE_STRAP); u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP); /* * SFUSE_STRAP is supposed to have a bit signalling the display * is fused off. Unfortunately it seems that, at least in * certain cases, fused off display means that PCH display * reads don't land anywhere. In that case, we read 0s. * * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK * should be set when taking over after the firmware. */ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || (HAS_PCH_CPT(i915) && !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&i915->drm, "Display fused off, disabling\n"); goto display_fused_off; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&i915->drm, "PipeC fused off\n"); display_runtime->pipe_mask &= ~BIT(PIPE_C); display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (DISPLAY_VER(i915) >= 9) { u32 dfsm = intel_de_read(i915, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { display_runtime->pipe_mask &= ~BIT(PIPE_A); display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { display_runtime->pipe_mask &= ~BIT(PIPE_B); display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { display_runtime->pipe_mask &= ~BIT(PIPE_C); display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(i915) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { display_runtime->pipe_mask &= ~BIT(PIPE_D); display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (!display_runtime->pipe_mask) goto display_fused_off; if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) display_runtime->has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) display_runtime->fbc_mask = 0; if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) display_runtime->has_dmc = 0; if (IS_DISPLAY_VER(i915, 10, 12) && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) display_runtime->has_dsc = 0; } return; display_fused_off: memset(display_runtime, 0, sizeof(*display_runtime)); } void intel_display_device_info_print(const struct intel_display_device_info *info, const struct intel_display_runtime_info *runtime, struct drm_printer *p) { if (runtime->ip.rel) drm_printf(p, "display version: %u.%02u\n", runtime->ip.ver, runtime->ip.rel); else drm_printf(p, "display version: %u\n", runtime->ip.ver); #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); }
linux-master
drivers/gpu/drm/i915/display/intel_display_device.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_panel.h" #include "intel_pch_refclk.h" #include "intel_sbi.h" static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) { intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS, 100)) drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); } /* WaMPhyProgramming:hsw */ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) { u32 tmp; lpt_fdi_reset_mphy(dev_priv); tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); tmp |= (0x12 << 24); intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); tmp &= ~(7 << 13); tmp |= (5 << 13); intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); tmp &= ~(7 << 13); tmp |= (5 << 13); intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); tmp &= ~0xFF; tmp |= 0x1C; intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); tmp &= ~0xFF; tmp |= 0x1C; intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); tmp &= ~(0xFF << 16); tmp |= (0x1C << 16); intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); tmp &= ~(0xFF << 16); tmp |= (0x1C << 16); intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); tmp |= (1 << 27); intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); tmp |= (1 << 27); intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); tmp &= ~(0xF << 28); tmp |= (4 << 28); intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); tmp &= ~(0xF << 28); tmp |= (4 << 28); intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); } void lpt_disable_iclkip(struct drm_i915_private *dev_priv) { u32 temp; intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); mutex_lock(&dev_priv->sb_lock); temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); temp |= SBI_SSCCTL_DISABLE; intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); mutex_unlock(&dev_priv->sb_lock); } struct iclkip_params { u32 iclk_virtual_root_freq; u32 iclk_pi_range; u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor; }; static void iclkip_params_init(struct iclkip_params *p) { memset(p, 0, sizeof(*p)); p->iclk_virtual_root_freq = 172800 * 1000; p->iclk_pi_range = 64; } static int lpt_iclkip_freq(struct iclkip_params *p) { return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, p->desired_divisor << p->auxdiv); } static void lpt_compute_iclkip(struct iclkip_params *p, int clock) { iclkip_params_init(p); /* The iCLK virtual clock root frequency is in MHz, * but the adjusted_mode->crtc_clock in KHz. To get the * divisors, it is necessary to divide one by another, so we * convert the virtual clock precision to KHz here for higher * precision. */ for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) { p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, clock << p->auxdiv); p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2; p->phaseinc = p->desired_divisor % p->iclk_pi_range; /* * Near 20MHz is a corner case which is * out of range for the 7-bit divisor */ if (p->divsel <= 0x7f) break; } } int lpt_iclkip(const struct intel_crtc_state *crtc_state) { struct iclkip_params p; lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock); return lpt_iclkip_freq(&p); } /* Program iCLKIP clock to the desired frequency */ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int clock = crtc_state->hw.adjusted_mode.crtc_clock; struct iclkip_params p; u32 temp; lpt_disable_iclkip(dev_priv); lpt_compute_iclkip(&p, clock); drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock); /* This should not happen with any sane values */ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) & ~SBI_SSCDIVINTPHASE_INCVAL_MASK); drm_dbg_kms(&dev_priv->drm, "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc); mutex_lock(&dev_priv->sb_lock); /* Program SSCDIVINTPHASE6 */ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel); temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc); temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir); temp |= SBI_SSCDIVINTPHASE_PROPAGATE; intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); /* Program SSCAUXDIV */ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv); intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); /* Enable modulator and associated divider */ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); temp &= ~SBI_SSCCTL_DISABLE; intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); mutex_unlock(&dev_priv->sb_lock); /* Wait for initialization time */ udelay(24); intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); } int lpt_get_iclkip(struct drm_i915_private *dev_priv) { struct iclkip_params p; u32 temp; if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) return 0; iclkip_params_init(&p); mutex_lock(&dev_priv->sb_lock); temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); if (temp & SBI_SSCCTL_DISABLE) { mutex_unlock(&dev_priv->sb_lock); return 0; } temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> SBI_SSCDIVINTPHASE_INCVAL_SHIFT; temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; mutex_unlock(&dev_priv->sb_lock); p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc; return lpt_iclkip_freq(&p); } /* Implements 3 different sequences from BSpec chapter "Display iCLK * Programming" based on the parameters passed: * - Sequence to enable CLKOUT_DP * - Sequence to enable CLKOUT_DP without spread * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O */ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, bool with_spread, bool with_fdi) { u32 reg, tmp; if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, "FDI requires downspread\n")) with_spread = true; if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && with_fdi, "LP PCH doesn't have FDI\n")) with_fdi = false; mutex_lock(&dev_priv->sb_lock); tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); tmp &= ~SBI_SSCCTL_DISABLE; tmp |= SBI_SSCCTL_PATHALT; intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); udelay(24); if (with_spread) { tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); tmp &= ~SBI_SSCCTL_PATHALT; intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); if (with_fdi) lpt_fdi_program_mphy(dev_priv); } reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); mutex_unlock(&dev_priv->sb_lock); } /* Sequence to disable CLKOUT_DP */ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) { u32 reg, tmp; mutex_lock(&dev_priv->sb_lock); reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); if (!(tmp & SBI_SSCCTL_DISABLE)) { if (!(tmp & SBI_SSCCTL_PATHALT)) { tmp |= SBI_SSCCTL_PATHALT; intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); udelay(32); } tmp |= SBI_SSCCTL_DISABLE; intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); } mutex_unlock(&dev_priv->sb_lock); } #define BEND_IDX(steps) ((50 + (steps)) / 5) static const u16 sscdivintphase[] = { [BEND_IDX( 50)] = 0x3B23, [BEND_IDX( 45)] = 0x3B23, [BEND_IDX( 40)] = 0x3C23, [BEND_IDX( 35)] = 0x3C23, [BEND_IDX( 30)] = 0x3D23, [BEND_IDX( 25)] = 0x3D23, [BEND_IDX( 20)] = 0x3E23, [BEND_IDX( 15)] = 0x3E23, [BEND_IDX( 10)] = 0x3F23, [BEND_IDX( 5)] = 0x3F23, [BEND_IDX( 0)] = 0x0025, [BEND_IDX( -5)] = 0x0025, [BEND_IDX(-10)] = 0x0125, [BEND_IDX(-15)] = 0x0125, [BEND_IDX(-20)] = 0x0225, [BEND_IDX(-25)] = 0x0225, [BEND_IDX(-30)] = 0x0325, [BEND_IDX(-35)] = 0x0325, [BEND_IDX(-40)] = 0x0425, [BEND_IDX(-45)] = 0x0425, [BEND_IDX(-50)] = 0x0525, }; /* * Bend CLKOUT_DP * steps -50 to 50 inclusive, in steps of 5 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) * change in clock period = -(steps / 10) * 5.787 ps */ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) { u32 tmp; int idx = BEND_IDX(steps); if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) return; if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) return; mutex_lock(&dev_priv->sb_lock); if (steps % 10 != 0) tmp = 0xAAAAAAAB; else tmp = 0x00000000; intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); tmp &= 0xffff0000; tmp |= sscdivintphase[idx]; intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); mutex_unlock(&dev_priv->sb_lock); } #undef BEND_IDX static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) { u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); u32 ctl = intel_de_read(dev_priv, SPLL_CTL); if ((ctl & SPLL_PLL_ENABLE) == 0) return false; if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; if (IS_BROADWELL(dev_priv) && (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) return true; return false; } static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); if ((ctl & WRPLL_PLL_ENABLE) == 0) return false; if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) return true; if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) && (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; return false; } static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; bool has_fdi = false; for_each_intel_encoder(&dev_priv->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_ANALOG: has_fdi = true; break; default: break; } } /* * The BIOS may have decided to use the PCH SSC * reference so we must not disable it until the * relevant PLLs have stopped relying on it. We'll * just leave the PCH SSC reference enabled in case * any active PLL is using it. It will get disabled * after runtime suspend if we don't have FDI. * * TODO: Move the whole reference clock handling * to the modeset sequence proper so that we can * actually enable/disable/reconfigure these things * safely. To do that we need to introduce a real * clock hierarchy. That would also allow us to do * clock bending finally. */ dev_priv->display.dpll.pch_ssc_use = 0; if (spll_uses_pch_ssc(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2); } if (dev_priv->display.dpll.pch_ssc_use) return; if (has_fdi) { lpt_bend_clkout_dp(dev_priv, 0); lpt_enable_clkout_dp(dev_priv, true, true); } else { lpt_disable_clkout_dp(dev_priv); } } static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(&dev_priv->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: has_panel = true; has_lvds = true; break; case INTEL_OUTPUT_EDP: has_panel = true; if (encoder->port == PORT_A) has_cpu_edp = true; break; default: break; } } if (HAS_PCH_IBX(dev_priv)) { has_ck505 = dev_priv->display.vbt.display_clock_mode; can_ssc = has_ck505; } else { has_ck505 = false; can_ssc = true; } /* Check if any DPLLs are using the SSC source */ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); if (!(temp & DPLL_VCO_ENABLE)) continue; if ((temp & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { using_ssc_source = true; break; } } drm_dbg_kms(&dev_priv->drm, "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ val = intel_de_read(dev_priv, PCH_DREF_CONTROL); /* As we must carefully and slowly disable/enable each source in turn, * compute the final state we want first and check if we need to * make any changes at all. */ final = val; final &= ~DREF_NONSPREAD_SOURCE_MASK; if (has_ck505) final |= DREF_NONSPREAD_CK505_ENABLE; else final |= DREF_NONSPREAD_SOURCE_ENABLE; final &= ~DREF_SSC_SOURCE_MASK; final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; final &= ~DREF_SSC1_ENABLE; if (has_panel) { final |= DREF_SSC_SOURCE_ENABLE; if (intel_panel_use_ssc(dev_priv) && can_ssc) final |= DREF_SSC1_ENABLE; if (has_cpu_edp) { if (intel_panel_use_ssc(dev_priv) && can_ssc) final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; else final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else { final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; } } else if (using_ssc_source) { final |= DREF_SSC_SOURCE_ENABLE; final |= DREF_SSC1_ENABLE; } if (final == val) return; /* Always enable nonspread source */ val &= ~DREF_NONSPREAD_SOURCE_MASK; if (has_ck505) val |= DREF_NONSPREAD_CK505_ENABLE; else val |= DREF_NONSPREAD_SOURCE_ENABLE; if (has_panel) { val &= ~DREF_SSC_SOURCE_MASK; val |= DREF_SSC_SOURCE_ENABLE; /* SSC must be turned on before enabling the CPU output */ if (intel_panel_use_ssc(dev_priv) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); val |= DREF_SSC1_ENABLE; } else { val &= ~DREF_SSC1_ENABLE; } /* Get SSC going before enabling the outputs */ intel_de_write(dev_priv, PCH_DREF_CONTROL, val); intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { if (intel_panel_use_ssc(dev_priv) && can_ssc) { drm_dbg_kms(&dev_priv->drm, "Using SSC on eDP\n"); val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; } else { val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } } else { val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; } intel_de_write(dev_priv, PCH_DREF_CONTROL, val); intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); } else { drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Turn off CPU output */ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; intel_de_write(dev_priv, PCH_DREF_CONTROL, val); intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); if (!using_ssc_source) { drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); /* Turn off the SSC source */ val &= ~DREF_SSC_SOURCE_MASK; val |= DREF_SSC_SOURCE_DISABLE; /* Turn off SSC1 */ val &= ~DREF_SSC1_ENABLE; intel_de_write(dev_priv, PCH_DREF_CONTROL, val); intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); } } drm_WARN_ON(&dev_priv->drm, val != final); } /* * Initialize reference clocks when the driver loads */ void intel_init_pch_refclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) ilk_init_pch_refclk(dev_priv); else if (HAS_PCH_LPT(dev_priv)) lpt_init_pch_refclk(dev_priv); }
linux-master
drivers/gpu/drm/i915/display/intel_pch_refclk.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include "g4x_dp.h" #include "i915_reg.h" #include "intel_crt.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" #include "intel_pps.h" #include "intel_sdvo.h" bool intel_has_pch_trancoder(struct drm_i915_private *i915, enum pipe pch_transcoder) { return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) || (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A); } enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (HAS_PCH_LPT(i915)) return PIPE_A; else return crtc->pipe; } static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t dp_reg) { enum pipe port_pipe; bool state; state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); I915_STATE_WARN(dev_priv, state && port_pipe == pipe, "PCH DP %c enabled on transcoder %c, should be disabled\n", port_name(port), pipe_name(pipe)); I915_STATE_WARN(dev_priv, HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, "IBX PCH DP %c still using transcoder B\n", port_name(port)); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, enum port port, i915_reg_t hdmi_reg) { enum pipe port_pipe; bool state; state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); I915_STATE_WARN(dev_priv, state && port_pipe == pipe, "PCH HDMI %c enabled on transcoder %c, should be disabled\n", port_name(port), pipe_name(pipe)); I915_STATE_WARN(dev_priv, HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, "IBX PCH HDMI %c still using transcoder B\n", port_name(port)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { enum pipe port_pipe; assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); I915_STATE_WARN(dev_priv, intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && port_pipe == pipe, "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); I915_STATE_WARN(dev_priv, intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); /* PCH SDVOB multiplex with HDMIB */ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); } static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 val; bool enabled; val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); enabled = !!(val & TRANS_ENABLE); I915_STATE_WARN(dev_priv, enabled, "transcoder assertion failed, should be off on pipe %c but is still active\n", pipe_name(pipe)); } static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, enum port port, i915_reg_t hdmi_reg) { u32 val = intel_de_read(dev_priv, hdmi_reg); if (val & SDVO_ENABLE || (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) return; drm_dbg_kms(&dev_priv->drm, "Sanitizing transcoder select for HDMI %c\n", port_name(port)); val &= ~SDVO_PIPE_SEL_MASK; val |= SDVO_PIPE_SEL(PIPE_A); intel_de_write(dev_priv, hdmi_reg, val); } static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, enum port port, i915_reg_t dp_reg) { u32 val = intel_de_read(dev_priv, dp_reg); if (val & DP_PORT_EN || (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) return; drm_dbg_kms(&dev_priv->drm, "Sanitizing transcoder select for DP %c\n", port_name(port)); val &= ~DP_PIPE_SEL_MASK; val |= DP_PIPE_SEL(PIPE_A); intel_de_write(dev_priv, dp_reg, val); } static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) { /* * The BIOS may select transcoder B on some of the PCH * ports even it doesn't enable the port. This would trip * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). * Sanitize the transcoder select bits to prevent that. We * assume that the BIOS never actually enabled the port, * because if it did we'd actually have to toggle the port * on and back off to make the transcoder A select stick * (see. intel_dp_link_down(), intel_disable_hdmi(), * intel_disable_sdvo()). */ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); /* PCH SDVOB multiplex with HDMIB */ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); } static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc, const struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_set_m_n(dev_priv, m_n, PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc, const struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_set_m_n(dev_priv, m_n, PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); } void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_get_m_n(dev_priv, m_n, PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; intel_get_m_n(dev_priv, m_n, PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); } static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder))); } static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 val, pipeconf_val; /* Make sure PCH DPLL is enabled */ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); if (HAS_PCH_CPT(dev_priv)) { reg = TRANS_CHICKEN2(pipe); val = intel_de_read(dev_priv, reg); /* * Workaround: Set the timing override bit * before enabling the pch transcoder. */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; /* Configure frame start delay to match the CPU */ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1); intel_de_write(dev_priv, reg, val); } reg = PCH_TRANSCONF(pipe); val = intel_de_read(dev_priv, reg); pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe)); if (HAS_PCH_IBX(dev_priv)) { /* Configure frame start delay to match the CPU */ val &= ~TRANS_FRAME_START_DELAY_MASK; val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1); /* * Make the BPC in transcoder be consistent with * that in pipeconf reg. For HDMI we must use 8bpc * here for both 8bpc and 12bpc. */ val &= ~TRANSCONF_BPC_MASK; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val |= TRANSCONF_BPC_8; else val |= pipeconf_val & TRANSCONF_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; else val |= TRANS_INTERLACE_INTERLACED; } else { val |= TRANS_INTERLACE_PROGRESSIVE; } intel_de_write(dev_priv, reg, val | TRANS_ENABLE); if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", pipe_name(pipe)); } static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); /* Ports must be off as well */ assert_pch_ports_disabled(dev_priv, pipe); reg = PCH_TRANSCONF(pipe); intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", pipe_name(pipe)); if (HAS_PCH_CPT(dev_priv)) /* Workaround: Clear the timing override chicken bit again. */ intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void ilk_pch_pre_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); /* * Note: FDI PLL enabling _must_ be done before we enable the * cpu pipes, hence this is separate from all the other fdi/pch * enabling. */ ilk_fdi_pll_enable(crtc_state); } /* * Enable PCH resources required for PCH ports: * - PCH PLLs * - FDI training & RX/TX * - update transcoder timings * - DP transcoding bits * - transcoder */ void ilk_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; u32 temp; assert_pch_transcoder_disabled(dev_priv, pipe); /* For PCH output, training FDI link */ intel_fdi_link_train(crtc, crtc_state); /* * We need to program the right clock selection * before writing the pixel multiplier into the DPLL. */ if (HAS_PCH_CPT(dev_priv)) { u32 sel; temp = intel_de_read(dev_priv, PCH_DPLL_SEL); temp |= TRANS_DPLL_ENABLE(pipe); sel = TRANS_DPLLB_SEL(pipe); if (crtc_state->shared_dpll == intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) temp |= sel; else temp &= ~sel; intel_de_write(dev_priv, PCH_DPLL_SEL, temp); } /* * XXX: pch pll's can be enabled any time before we enable the PCH * transcoder, and we actually should do this to not upset any PCH * transcoder that already use the clock when we share it. * * Note that enable_shared_dpll tries to do the right thing, but * get_shared_dpll unconditionally resets the pll - we need that * to have the right LVDS enable sequence. */ intel_enable_shared_dpll(crtc_state); /* set transcoder timing, panel must allow it */ assert_pps_unlocked(dev_priv, pipe); if (intel_crtc_has_dp_encoder(crtc_state)) { intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n); intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2); } ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev_priv) && intel_crtc_has_dp_encoder(crtc_state)) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); enum port port; temp = intel_de_read(dev_priv, reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_VSYNC_ACTIVE_HIGH | TRANS_DP_HSYNC_ACTIVE_HIGH | TRANS_DP_BPC_MASK); temp |= TRANS_DP_OUTPUT_ENABLE; temp |= bpc << 9; /* same format but at 11:9 */ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; port = intel_get_crtc_new_encoder(state, crtc_state)->port; drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D); temp |= TRANS_DP_PORT_SEL(port); intel_de_write(dev_priv, reg, temp); } ilk_enable_pch_transcoder(crtc_state); } void ilk_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { ilk_fdi_disable(crtc); } void ilk_pch_post_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; ilk_disable_pch_transcoder(crtc); if (HAS_PCH_CPT(dev_priv)) { /* disable TRANS_DP_CTL */ intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe), TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK, TRANS_DP_PORT_SEL_NONE); /* disable DPLL_SEL */ intel_de_rmw(dev_priv, PCH_DPLL_SEL, TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0); } ilk_fdi_pll_disable(crtc); } static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* read out port_clock from the DPLL */ i9xx_crtc_clock_get(crtc, crtc_state); /* * In case there is an active pipe without active ports, * we may need some idea for the dotclock anyway. * Calculate one based on the FDI configuration. */ crtc_state->hw.adjusted_mode.crtc_clock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state), &crtc_state->fdi_m_n); } void ilk_pch_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum pipe pipe = crtc->pipe; enum intel_dpll_id pll_id; bool pll_active; u32 tmp; if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0) return; crtc_state->has_pch_encoder = true; tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, &crtc_state->fdi_m_n); if (HAS_PCH_IBX(dev_priv)) { /* * The pipe->pch transcoder and pch transcoder->pll * mapping is fixed. */ pll_id = (enum intel_dpll_id) pipe; } else { tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); if (tmp & TRANS_DPLLB_SEL(pipe)) pll_id = DPLL_ID_PCH_PLL_B; else pll_id = DPLL_ID_PCH_PLL_A; } crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id); pll = crtc_state->shared_dpll; pll_active = intel_dpll_get_hw_state(dev_priv, pll, &crtc_state->dpll_hw_state); drm_WARN_ON(&dev_priv->drm, !pll_active); tmp = crtc_state->dpll_hw_state.dpll; crtc_state->pixel_multiplier = ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; ilk_pch_clock_get(crtc_state); } static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val, pipeconf_val; /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); assert_fdi_rx_enabled(dev_priv, PIPE_A); val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); /* Workaround: set timing override bit. */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; /* Configure frame start delay to match the CPU */ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1); intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK) val |= TRANS_INTERLACE_INTERLACED; else val |= TRANS_INTERLACE_PROGRESSIVE; intel_de_write(dev_priv, LPT_TRANSCONF, val); if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 100)) drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); } static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) { intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void lpt_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); assert_pch_transcoder_disabled(dev_priv, PIPE_A); lpt_program_iclkip(crtc_state); /* Set transcoder timing. */ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); lpt_enable_pch_transcoder(crtc_state); } void lpt_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); lpt_disable_pch_transcoder(dev_priv); lpt_disable_iclkip(dev_priv); } void lpt_pch_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) return; crtc_state->has_pch_encoder = true; tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, &crtc_state->fdi_m_n); crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } void intel_pch_sanitize(struct drm_i915_private *i915) { if (HAS_PCH_IBX(i915)) ibx_sanitize_pch_ports(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_pch_display.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/display/drm_dsc.h> #include "i915_utils.h" #include "intel_qp_tables.h" /* from BPP 6 to 24 in steps of 0.5 */ #define RC_RANGE_QP444_8BPC_MAX_NUM_BPP 37 /* from BPP 6 to 30 in steps of 0.5 */ #define RC_RANGE_QP444_10BPC_MAX_NUM_BPP 49 /* from BPP 6 to 36 in steps of 0.5 */ #define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61 /* For YCbCr420 the bits_per_pixel sent in PPS params * is double the target bpp. The below values represent * the target bpp. */ /* from BPP 4 to 12 in steps of 0.5 */ #define RC_RANGE_QP420_8BPC_MAX_NUM_BPP 17 /* from BPP 4 to 15 in steps of 0.5 */ #define RC_RANGE_QP420_10BPC_MAX_NUM_BPP 23 /* from BPP 4 to 18 in steps of 0.5 */ #define RC_RANGE_QP420_12BPC_MAX_NUM_BPP 29 /* * These qp tables are as per the C model * and it has the rows pointing to bpps which increment * in steps of 0.5 * We do not support fractional bpps as of today, * hence we would skip the fractional bpps during * our references for qp calclulations. */ static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }, { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }, { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, { 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, { 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 }, { 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 }, { 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 }, { 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 }, { 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3 } }; static const u8 rc_range_maxqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = { { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 }, { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 }, { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 }, { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 }, { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1 }, { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 }, { 12, 11, 11, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 }, { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1 }, { 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 }, { 12, 12, 12, 12, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 }, { 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 }, { 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4 } }; static const u8 rc_range_minqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0 }, { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0 }, { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0 }, { 10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0 }, { 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1 }, { 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 }, { 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 }, { 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 }, { 18, 18, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 13, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3 } }; static const u8 rc_range_maxqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = { { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 12, 11, 11, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 }, { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0 }, { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1 }, { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 10, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1 }, { 15, 15, 14, 14, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1 }, { 16, 15, 15, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1 }, { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 12, 11, 11, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 }, { 16, 16, 16, 15, 15, 15, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 }, { 16, 16, 16, 16, 15, 15, 15, 15, 15, 14, 14, 13, 13, 13, 12, 12, 12, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 }, { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 }, { 19, 19, 18, 18, 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4 } }; static const u8 rc_range_minqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 11, 10, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 8, 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 }, { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0 }, { 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0 }, { 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1 }, { 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 }, { 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 }, { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1 }, { 22, 22, 21, 21, 20, 20, 20, 20, 19, 19, 18, 18, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3 } }; static const u8 rc_range_maxqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = { { 12, 12, 12, 12, 12, 12, 11, 11, 11, 10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 16, 15, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 10, 10, 9, 9, 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 11, 11, 10, 10, 10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11, 10, 10, 10, 10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 }, { 17, 16, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 13, 12, 12, 11, 11, 11, 11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0 }, { 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11, 11, 11, 11, 11, 10, 10, 10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0 }, { 18, 18, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 14, 13, 13, 12, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1 }, { 19, 19, 18, 18, 17, 17, 17, 17, 17, 17, 16, 16, 16, 15, 15, 14, 14, 13, 13, 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1 }, { 20, 19, 19, 18, 18, 18, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 14, 14, 13, 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1 }, { 20, 20, 19, 19, 18, 18, 18, 18, 18, 18, 17, 17, 17, 16, 16, 15, 15, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2 }, { 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 15, 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 }, { 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 }, { 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 18, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2 }, { 23, 23, 22, 22, 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 17, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 } }; static const u8 rc_range_minqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, { 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0 }, { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0 }, { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0 }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1 }, { 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 1, 1 }, { 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 2, 2, 1 }, { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 2, 1 }, { 9, 8, 8, 7, 7, 7, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 2 }, { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3 } }; static const u8 rc_range_maxqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = { { 4, 4, 3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 4, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, { 5, 5, 5, 5, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, { 6, 6, 6, 6, 6, 5, 4, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0 }, { 7, 7, 7, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 1, 1, 1, 0 }, { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 1, 1, 0 }, { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1 }, { 8, 8, 8, 8, 8, 7, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1 }, { 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 }, { 10, 10, 9, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 3, 3, 2, 2 }, { 10, 10, 10, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 2, 2 }, { 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2 }, { 11, 11, 11, 10, 9, 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3, 2 }, { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4, 4, 4, 3 }, { 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4 } }; static const u8 rc_range_minqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0 }, { 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0 }, { 7, 7, 7, 7, 7, 6, 5, 5, 5, 5, 5, 4, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0 }, { 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 2, 2, 1, 1, 1, 0 }, { 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 4, 4, 4, 3, 2, 2, 2, 1, 1, 1, 0 }, { 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 2, 1, 1 }, { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1 }, { 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 }, { 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 7, 6, 6, 5, 4, 4, 3, 3, 2, 1 }, { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 2, 1 }, { 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 3, 3, 2, 2 }, { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4 } }; static const u8 rc_range_maxqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = { { 8, 8, 7, 6, 4, 4, 3, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 8, 8, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }, { 9, 9, 9, 8, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0 }, { 10, 10, 10, 9, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 0, 0 }, { 11, 11, 11, 10, 10, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0 }, { 11, 11, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 1, 1 }, { 11, 11, 11, 11, 11, 10, 9, 8, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 3, 2, 2, 2, 1 }, { 12, 12, 12, 12, 12, 11, 10, 9, 8, 8, 8, 7, 6, 5, 5, 4, 3, 3, 3, 2, 2, 2, 1 }, { 13, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 3, 2, 2 }, { 14, 14, 13, 13, 13, 12, 11, 10, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2 }, { 14, 14, 14, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 4, 3, 3, 2 }, { 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3, 2 }, { 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 4, 3, 2 }, { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4, 4, 3, 3 }, { 18, 17, 17, 16, 15, 15, 14, 13, 13, 12, 11, 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5 } }; static const u8 rc_range_minqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 10, 9, 9, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 11, 10, 10, 10, 10, 9, 9, 8, 7, 6, 6, 6, 6, 5, 5, 4, 3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0 }, { 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 1, 1, 0, 0, 0, 0, 0 }, { 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 7, 6, 5, 5, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 1, 0 }, { 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 8, 8, 8, 7, 6, 6, 5, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1 }, { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 5, 4, 4, 2, 2, 1, 1, 1, 1 }, { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 2, 2, 1, 1, 1 }, { 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 4, 3, 3, 2, 2, 1, 1 }, { 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 11, 10, 10, 9, 8, 8, 7, 7, 6, 5, 4, 3, 3, 2, 2, 1 }, { 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 2, 2, 1 }, { 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 6, 6, 5, 4, 4, 3, 3, 2 }, { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 14, 13, 12, 12, 11, 10, 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4 } }; static const u8 rc_range_maxqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = { { 11, 10, 9, 8, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 12, 11, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 }, { 13, 12, 12, 11, 11, 10, 9, 8, 7, 6, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 }, { 14, 13, 13, 12, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0 }, { 15, 14, 14, 13, 13, 11, 10, 9, 8, 7, 7, 7, 7, 6, 6, 5, 4, 4, 4, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0 }, { 15, 15, 15, 14, 14, 13, 12, 11, 10, 10, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 0, 0, 0 }, { 15, 15, 15, 15, 15, 14, 13, 12, 11, 11, 11, 10, 9, 8, 7, 6, 6, 6, 6, 5, 4, 4, 3, 2, 2, 2, 1, 1, 0 }, { 16, 16, 16, 16, 16, 15, 14, 13, 12, 12, 12, 11, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 1 }, { 17, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 6, 6, 5, 5, 3, 3, 2, 2, 1, 1 }, { 18, 18, 17, 17, 17, 16, 15, 14, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 6, 5, 5, 4, 3, 3, 2, 2, 1 }, { 18, 18, 18, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 10, 9, 9, 8, 8, 7, 6, 5, 4, 4, 3, 3, 2, 2 }, { 19, 19, 18, 18, 17, 17, 16, 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 6, 5, 4, 4, 3, 3, 2 }, { 19, 19, 19, 18, 17, 17, 17, 16, 15, 15, 14, 13, 13, 12, 12, 11, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 3, 3, 2 }, { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3 }, { 22, 21, 21, 20, 19, 19, 18, 17, 17, 16, 15, 15, 15, 14, 13, 13, 12, 11, 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5 } }; #define PARAM_TABLE(_minmax, _bpc, _row, _col, _is_420) do { \ if (bpc == (_bpc)) { \ if (_is_420) \ return rc_range_##_minmax##qp420_##_bpc##bpc[_row][_col]; \ else \ return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \ } \ } while (0) u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i, bool is_420) { PARAM_TABLE(min, 8, buf_i, bpp_i, is_420); PARAM_TABLE(min, 10, buf_i, bpp_i, is_420); PARAM_TABLE(min, 12, buf_i, bpp_i, is_420); MISSING_CASE(bpc); return 0; } u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i, bool is_420) { PARAM_TABLE(max, 8, buf_i, bpp_i, is_420); PARAM_TABLE(max, 10, buf_i, bpp_i, is_420); PARAM_TABLE(max, 12, buf_i, bpp_i, is_420); MISSING_CASE(bpc); return 0; }
linux-master
drivers/gpu/drm/i915/display/intel_qp_tables.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include "g4x_dp.h" #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_pps.h" #include "intel_pps_regs.h" #include "intel_quirks.h" static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, enum pipe pipe); static void pps_init_delays(struct intel_dp *intel_dp); static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd); static const char *pps_name(struct drm_i915_private *i915, struct intel_pps *pps) { if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { switch (pps->pps_pipe) { case INVALID_PIPE: /* * FIXME would be nice if we can guarantee * to always have a valid PPS when calling this. */ return "PPS <none>"; case PIPE_A: return "PPS A"; case PIPE_B: return "PPS B"; default: MISSING_CASE(pps->pps_pipe); break; } } else { switch (pps->pps_idx) { case 0: return "PPS 0"; case 1: return "PPS 1"; default: MISSING_CASE(pps->pps_idx); break; } } return "PPS <invalid>"; } intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_wakeref_t wakeref; /* * See intel_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&dev_priv->display.pps.mutex); return wakeref; } intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->display.pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return 0; } static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps.pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); u32 DP; if (drm_WARN(&dev_priv->drm, intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, "skipping %s kick due to [ENCODER:%d:%s] being active\n", pps_name(dev_priv, &intel_dp->pps), dig_port->base.base.base.id, dig_port->base.base.name)) return; drm_dbg_kms(&dev_priv->drm, "kicking %s for [ENCODER:%d:%s]\n", pps_name(dev_priv, &intel_dp->pps), dig_port->base.base.base.id, dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; DP |= DP_PORT_WIDTH(1); DP |= DP_LINK_TRAIN_PAT_1; if (IS_CHERRYVIEW(dev_priv)) DP |= DP_PIPE_SEL_CHV(pipe); else DP |= DP_PIPE_SEL(pipe); pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; /* * The DPLL for the pipe must be enabled for this to work. * So enable temporarily it if it's not already enabled. */ if (!pll_enabled) { release_cl_override = IS_CHERRYVIEW(dev_priv) && !chv_phy_powergate_ch(dev_priv, phy, ch, true); if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) { drm_err(&dev_priv->drm, "Failed to force on PLL for pipe %c!\n", pipe_name(pipe)); return; } } /* * Similar magic as in intel_dp_enable_port(). * We _must_ do this port enable + disable trick * to make this power sequencer lock onto the port. * Otherwise even VDD force bit won't work. */ intel_de_write(dev_priv, intel_dp->output_reg, DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); intel_de_posting_read(dev_priv, intel_dp->output_reg); if (!pll_enabled) { vlv_force_pll_off(dev_priv, pipe); if (release_cl_override) chv_phy_powergate_ch(dev_priv, phy, ch, false); } } static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); /* * We don't have power sequencer currently. * Pick one that's not used by other ports. */ for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (encoder->type == INTEL_OUTPUT_EDP) { drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE && intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); if (intel_dp->pps.pps_pipe != INVALID_PIPE) pipes &= ~(1 << intel_dp->pps.pps_pipe); } else { drm_WARN_ON(&dev_priv->drm, intel_dp->pps.pps_pipe != INVALID_PIPE); if (intel_dp->pps.active_pipe != INVALID_PIPE) pipes &= ~(1 << intel_dp->pps.active_pipe); } } if (pipes == 0) return INVALID_PIPE; return ffs(pipes) - 1; } static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE && intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe); if (intel_dp->pps.pps_pipe != INVALID_PIPE) return intel_dp->pps.pps_pipe; pipe = vlv_find_free_pps(dev_priv); /* * Didn't find one. This should not happen since there * are two power sequencers and up to two eDP ports. */ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) pipe = PIPE_A; vlv_steal_power_sequencer(dev_priv, pipe); intel_dp->pps.pps_pipe = pipe; drm_dbg_kms(&dev_priv->drm, "picked %s for [ENCODER:%d:%s]\n", pps_name(dev_priv, &intel_dp->pps), dig_port->base.base.base.id, dig_port->base.base.name); /* init power sequencer on this pipe and port */ pps_init_delays(intel_dp); pps_init_registers(intel_dp, true); /* * Even vdd force doesn't work until we've made * the power sequencer lock in on the port. */ vlv_power_sequencer_kick(intel_dp); return intel_dp->pps.pps_pipe; } static int bxt_power_sequencer_idx(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int pps_idx = intel_dp->pps.pps_idx; lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); if (!intel_dp->pps.pps_reset) return pps_idx; intel_dp->pps.pps_reset = false; /* * Only the HW needs to be reprogrammed, the SW state is fixed and * has been setup during connector init. */ pps_init_registers(intel_dp, false); return pps_idx; } typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx); static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx) { return intel_de_read(dev_priv, PP_STATUS(pps_idx)) & PP_ON; } static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx) { return intel_de_read(dev_priv, PP_CONTROL(pps_idx)) & EDP_FORCE_VDD; } static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx) { return true; } static enum pipe vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, enum port port, pps_check check) { enum pipe pipe; for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & PANEL_PORT_SELECT_MASK; if (port_sel != PANEL_PORT_SELECT_VLV(port)) continue; if (!check(dev_priv, pipe)) continue; return pipe; } return INVALID_PIPE; } static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum port port = dig_port->base.port; lockdep_assert_held(&dev_priv->display.pps.mutex); /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, pps_has_pp_on); /* didn't find one? pick one where vdd is on */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, pps_has_vdd_on); /* didn't find one? pick one with just the correct port */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port, pps_any); /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ if (intel_dp->pps.pps_pipe == INVALID_PIPE) { drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] no initial power sequencer\n", dig_port->base.base.base.id, dig_port->base.base.name); return; } drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] initial power sequencer: %s\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); } static int intel_num_pps(struct drm_i915_private *i915) { if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) return 2; if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) return 2; if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1; if (INTEL_PCH_TYPE(i915) >= PCH_ICP) return 2; return 1; } static bool intel_pps_is_valid(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (intel_dp->pps.pps_idx == 1 && INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) < PCH_MTP) return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; return true; } static int bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check) { int pps_idx, pps_num = intel_num_pps(i915); for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { if (check(i915, pps_idx)) return pps_idx; } return -1; } static bool pps_initial_setup(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = to_i915(encoder->base.dev); lockdep_assert_held(&i915->display.pps.mutex); if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { vlv_initial_power_sequencer_setup(intel_dp); return true; } /* first ask the VBT */ if (intel_num_pps(i915) > 1) intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; else intel_dp->pps.pps_idx = 0; if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915))) intel_dp->pps.pps_idx = -1; /* VBT wasn't parsed yet? pick one where the panel is on */ if (intel_dp->pps.pps_idx < 0) intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on); /* didn't find one? pick one where vdd is on */ if (intel_dp->pps.pps_idx < 0) intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on); /* didn't find one? pick any */ if (intel_dp->pps.pps_idx < 0) { intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n", encoder->base.base.id, encoder->base.name, pps_name(i915, &intel_dp->pps)); } else { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] initial power sequencer: %s\n", encoder->base.base.id, encoder->base.name, pps_name(i915, &intel_dp->pps)); } return intel_pps_is_valid(intel_dp); } void intel_pps_reset_all(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv))) return; if (!HAS_DISPLAY(dev_priv)) return; /* * We can't grab pps_mutex here due to deadlock with power_domain * mutex when power_domain functions are called while holding pps_mutex. * That also means that in order to use pps_pipe the code needs to * hold both a power domain reference and pps_mutex, and the power domain * reference get/put must be done while _not_ holding pps_mutex. * pps_{lock,unlock}() do these steps in the correct order, so one * should use them always. */ for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); if (encoder->type != INTEL_OUTPUT_EDP) continue; if (DISPLAY_VER(dev_priv) >= 9) intel_dp->pps.pps_reset = true; else intel_dp->pps.pps_pipe = INVALID_PIPE; } } struct pps_registers { i915_reg_t pp_ctrl; i915_reg_t pp_stat; i915_reg_t pp_on; i915_reg_t pp_off; i915_reg_t pp_div; }; static void intel_pps_get_registers(struct intel_dp *intel_dp, struct pps_registers *regs) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int pps_idx; memset(regs, 0, sizeof(*regs)); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pps_idx = vlv_power_sequencer_pipe(intel_dp); else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pps_idx = bxt_power_sequencer_idx(intel_dp); else pps_idx = intel_dp->pps.pps_idx; regs->pp_ctrl = PP_CONTROL(pps_idx); regs->pp_stat = PP_STATUS(pps_idx); regs->pp_on = PP_ON_DELAYS(pps_idx); regs->pp_off = PP_OFF_DELAYS(pps_idx); /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) regs->pp_div = INVALID_MMIO_REG; else regs->pp_div = PP_DIVISOR(pps_idx); } static i915_reg_t _pp_ctrl_reg(struct intel_dp *intel_dp) { struct pps_registers regs; intel_pps_get_registers(intel_dp, &regs); return regs.pp_ctrl; } static i915_reg_t _pp_stat_reg(struct intel_dp *intel_dp) { struct pps_registers regs; intel_pps_get_registers(intel_dp, &regs); return regs.pp_stat; } static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) return false; return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; } static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) return false; return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; } void intel_pps_check_power_unlocked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { drm_WARN(&dev_priv->drm, 1, "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps), intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); } } #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) static void intel_pps_verify_state(struct intel_dp *intel_dp); static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_verify_state(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps), mask, value, intel_de_read(dev_priv, pp_stat_reg), intel_de_read(dev_priv, pp_ctrl_reg)); if (intel_de_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) drm_err(&dev_priv->drm, "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps), intel_de_read(dev_priv, pp_stat_reg), intel_de_read(dev_priv, pp_ctrl_reg)); drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); } static void wait_panel_on(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(i915, &intel_dp->pps)); wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); } static void wait_panel_off(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(i915, &intel_dp->pps)); wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); } static void wait_panel_power_cycle(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); ktime_t panel_power_on_time; s64 panel_power_off_duration; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(i915, &intel_dp->pps)); /* take the difference of current time and panel power off time * and then make panel wait for t11_t12 if needed. */ panel_power_on_time = ktime_get_boottime(); panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time); /* When we disable the VDD override bit last we have to do the manual * wait. */ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay) wait_remaining_ms_from_jiffies(jiffies, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration); wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); } void intel_pps_wait_power_cycle(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; with_intel_pps_lock(intel_dp, wakeref) wait_panel_power_cycle(intel_dp); } static void wait_backlight_on(struct intel_dp *intel_dp) { wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on, intel_dp->pps.backlight_on_delay); } static void edp_wait_backlight_off(struct intel_dp *intel_dp) { wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off, intel_dp->pps.backlight_off_delay); } /* Read the current pp_control value, unlocking the register if it * is locked */ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; lockdep_assert_held(&dev_priv->display.pps.mutex); control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; } return control; } /* * Must be paired with intel_pps_vdd_off_unlocked(). * Must hold pps_mutex around the whole on/off sequence. * Can be nested with intel_pps_vdd_{on,off}() calls. */ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->pps.want_panel_vdd; lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return false; cancel_delayed_work(&intel_dp->pps.panel_vdd_work); intel_dp->pps.want_panel_vdd = true; if (edp_have_panel_vdd(intel_dp)) return need_to_disable; drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref); intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); pp = ilk_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps), intel_de_read(dev_priv, pp_stat_reg), intel_de_read(dev_priv, pp_ctrl_reg)); /* * If the panel wasn't on, delay before accessing aux channel */ if (!edp_have_panel_power(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s panel power wasn't enabled\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); msleep(intel_dp->pps.panel_power_up_delay); } return need_to_disable; } /* * Must be paired with intel_pps_off(). * Nested calls to these functions are not allowed since * we drop the lock. Caller must use some higher level * locking to prevent nested calls from other threads. */ void intel_pps_vdd_on(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); intel_wakeref_t wakeref; bool vdd; if (!intel_dp_is_edp(intel_dp)) return; vdd = false; with_intel_pps_lock(intel_dp, wakeref) vdd = intel_pps_vdd_on_unlocked(intel_dp); I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(i915, &intel_dp->pps)); } static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd); if (!edp_have_panel_vdd(intel_dp)) return; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); /* Make sure sequencer is idle before allowing subsequent activity */ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps), intel_de_read(dev_priv, pp_stat_reg), intel_de_read(dev_priv, pp_ctrl_reg)); if ((pp & PANEL_POWER_ON) == 0) intel_dp->pps.panel_power_off_time = ktime_get_boottime(); intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), fetch_and_zero(&intel_dp->pps.vdd_wakeref)); } void intel_pps_vdd_off_sync(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work); /* * vdd might still be enabled due to the delayed vdd off. * Make sure vdd is actually turned off here. */ with_intel_pps_lock(intel_dp, wakeref) intel_pps_vdd_off_sync_unlocked(intel_dp); } static void edp_panel_vdd_work(struct work_struct *__work) { struct intel_pps *pps = container_of(to_delayed_work(__work), struct intel_pps, panel_vdd_work); struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps); intel_wakeref_t wakeref; with_intel_pps_lock(intel_dp, wakeref) { if (!intel_dp->pps.want_panel_vdd) intel_pps_vdd_off_sync_unlocked(intel_dp); } } static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); unsigned long delay; /* * We may not yet know the real power sequencing delays, * so keep VDD enabled until we're done with init. */ if (intel_dp->pps.initializing) return; /* * Queue the timer to fire a long time from now (relative to the power * down delay) to keep the panel power up across a sequence of * operations. */ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); queue_delayed_work(i915->unordered_wq, &intel_dp->pps.panel_vdd_work, delay); } /* * Must be paired with edp_panel_vdd_on(). * Must hold pps_mutex around the whole on/off sequence. * Can be nested with intel_pps_vdd_{on,off}() calls. */ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(dev_priv, &intel_dp->pps)); intel_dp->pps.want_panel_vdd = false; if (sync) intel_pps_vdd_off_sync_unlocked(intel_dp); else edp_panel_vdd_schedule_off(intel_dp); } void intel_pps_on_unlocked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(dev_priv, &intel_dp->pps)); if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), "[ENCODER:%d:%s] %s panel power already on\n", dp_to_dig_port(intel_dp)->base.base.base.id, dp_to_dig_port(intel_dp)->base.base.name, pps_name(dev_priv, &intel_dp->pps))) return; wait_panel_power_cycle(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ilk_get_pp_control(intel_dp); if (IS_IRONLAKE(dev_priv)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); } pp |= PANEL_POWER_ON; if (!IS_IRONLAKE(dev_priv)) pp |= PANEL_POWER_RESET; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); wait_panel_on(intel_dp); intel_dp->pps.last_power_on = jiffies; if (IS_IRONLAKE(dev_priv)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); } } void intel_pps_on(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; with_intel_pps_lock(intel_dp, wakeref) intel_pps_on_unlocked(intel_dp); } void intel_pps_off_unlocked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s need VDD to turn off panel\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); pp = ilk_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | EDP_BLC_ENABLE); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); intel_dp->pps.want_panel_vdd = false; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); wait_panel_off(intel_dp); intel_dp->pps.panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port), fetch_and_zero(&intel_dp->pps.vdd_wakeref)); } void intel_pps_off(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; with_intel_pps_lock(intel_dp, wakeref) intel_pps_off_unlocked(intel_dp); } /* Enable backlight in the panel power control. */ void intel_pps_backlight_on(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_wakeref_t wakeref; /* * If we enable the backlight right away following a panel power * on, we may see slight flicker as the panel syncs with the eDP * link. So delay a bit to make sure the image is solid before * allowing it to appear. */ wait_backlight_on(intel_dp); with_intel_pps_lock(intel_dp, wakeref) { i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; pp = ilk_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); } } /* Disable backlight in the panel power control. */ void intel_pps_backlight_off(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; with_intel_pps_lock(intel_dp, wakeref) { i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); u32 pp; pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; intel_de_write(dev_priv, pp_ctrl_reg, pp); intel_de_posting_read(dev_priv, pp_ctrl_reg); } intel_dp->pps.last_backlight_off = jiffies; edp_wait_backlight_off(intel_dp); } /* * Hook for controlling the panel power control backlight through the bl_power * sysfs attribute. Take care to handle multiple calls. */ void intel_pps_backlight_power(struct intel_connector *connector, bool enable) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(connector); intel_wakeref_t wakeref; bool is_enabled; is_enabled = false; with_intel_pps_lock(intel_dp, wakeref) is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; if (is_enabled == enable) return; drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", enable ? "enable" : "disable"); if (enable) intel_pps_backlight_on(intel_dp); else intel_pps_backlight_off(intel_dp); } static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum pipe pipe = intel_dp->pps.pps_pipe; i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) return; intel_pps_vdd_off_sync_unlocked(intel_dp); /* * VLV seems to get confused when multiple power sequencers * have the same port selected (even if only one has power/vdd * enabled). The failure manifests as vlv_wait_port_ready() failing * CHV on the other hand doesn't seem to mind having the same port * selected in multiple power sequencers, but let's clear the * port select always when logically disconnecting a power sequencer * from a port. */ drm_dbg_kms(&dev_priv->drm, "detaching %s from [ENCODER:%d:%s]\n", pps_name(dev_priv, &intel_dp->pps), dig_port->base.base.base.id, dig_port->base.base.name); intel_de_write(dev_priv, pp_on_reg, 0); intel_de_posting_read(dev_priv, pp_on_reg); intel_dp->pps.pps_pipe = INVALID_PIPE; } static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_encoder *encoder; lockdep_assert_held(&dev_priv->display.pps.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe, "stealing PPS %c from active [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); if (intel_dp->pps.pps_pipe != pipe) continue; drm_dbg_kms(&dev_priv->drm, "stealing PPS %c from [ENCODER:%d:%s]\n", pipe_name(pipe), encoder->base.base.id, encoder->base.name); /* make sure vdd is off before we steal it */ vlv_detach_power_sequencer(intel_dp); } } void vlv_pps_init(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); if (intel_dp->pps.pps_pipe != INVALID_PIPE && intel_dp->pps.pps_pipe != crtc->pipe) { /* * If another power sequencer was being used on this * port previously make sure to turn off vdd there while * we still have control of it. */ vlv_detach_power_sequencer(intel_dp); } /* * We may be stealing the power * sequencer from another port. */ vlv_steal_power_sequencer(dev_priv, crtc->pipe); intel_dp->pps.active_pipe = crtc->pipe; if (!intel_dp_is_edp(intel_dp)) return; /* now it's all ours */ intel_dp->pps.pps_pipe = crtc->pipe; drm_dbg_kms(&dev_priv->drm, "initializing %s for [ENCODER:%d:%s]\n", pps_name(dev_priv, &intel_dp->pps), encoder->base.base.id, encoder->base.name); /* init power sequencer on this pipe and port */ pps_init_delays(intel_dp); pps_init_registers(intel_dp, true); } static void pps_vdd_init(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); if (!edp_have_panel_vdd(intel_dp)) return; /* * The VDD bit needs a power domain reference, so if the bit is * already enabled when we boot or resume, grab this reference and * schedule a vdd off, so we don't hold on to the reference * indefinitely. */ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(dev_priv, &intel_dp->pps)); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref); intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); } bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; bool have_power = false; with_intel_pps_lock(intel_dp, wakeref) { have_power = edp_have_panel_power(intel_dp) || edp_have_panel_vdd(intel_dp); } return have_power; } static void pps_init_timestamps(struct intel_dp *intel_dp) { /* * Initialize panel power off time to 0, assuming panel power could have * been toggled between kernel boot and now only by a previously loaded * and removed i915, which has already ensured sufficient power off * delay at module remove. */ intel_dp->pps.panel_power_off_time = 0; intel_dp->pps.last_power_on = jiffies; intel_dp->pps.last_backlight_off = jiffies; } static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, pp_ctl; struct pps_registers regs; intel_pps_get_registers(intel_dp, &regs); pp_ctl = ilk_get_pp_control(intel_dp); /* Ensure PPS is unlocked */ if (!HAS_DDI(dev_priv)) intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); pp_on = intel_de_read(dev_priv, regs.pp_on); pp_off = intel_de_read(dev_priv, regs.pp_off); /* Pull timing values out of registers */ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); if (i915_mmio_reg_valid(regs.pp_div)) { u32 pp_div; pp_div = intel_de_read(dev_priv, regs.pp_div); seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; } else { seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; } } static void intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, const struct edp_power_seq *seq) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", state_name, seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); } static void intel_pps_verify_state(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct edp_power_seq hw; struct edp_power_seq *sw = &intel_dp->pps.pps_delays; intel_pps_readout_hw_state(intel_dp, &hw); if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { drm_err(&i915->drm, "PPS state mismatch\n"); intel_pps_dump_state(intel_dp, "sw", sw); intel_pps_dump_state(intel_dp, "hw", &hw); } } static bool pps_delays_valid(struct edp_power_seq *delays) { return delays->t1_t3 || delays->t8 || delays->t9 || delays->t10 || delays->t11_t12; } static void pps_init_delays_bios(struct intel_dp *intel_dp, struct edp_power_seq *bios) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); *bios = intel_dp->pps.bios_pps_delays; intel_pps_dump_state(intel_dp, "bios", bios); } static void pps_init_delays_vbt(struct intel_dp *intel_dp, struct edp_power_seq *vbt) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; *vbt = connector->panel.vbt.edp.pps; if (!pps_delays_valid(vbt)) return; /* On Toshiba Satellite P50-C-18C system the VBT T12 delay * of 500ms appears to be too short. Ocassionally the panel * just fails to power back on. Increasing the delay to 800ms * seems sufficient to avoid this problem. */ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) { vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); drm_dbg_kms(&dev_priv->drm, "Increasing T12 panel delay as per the quirk to %d\n", vbt->t11_t12); } /* T11_T12 delay is special and actually in units of 100ms, but zero * based in the hw (so we need to add 100 ms). But the sw vbt * table multiplies it with 1000 to make it in units of 100usec, * too. */ vbt->t11_t12 += 100 * 10; intel_pps_dump_state(intel_dp, "vbt", vbt); } static void pps_init_delays_spec(struct intel_dp *intel_dp, struct edp_power_seq *spec) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->display.pps.mutex); /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ spec->t1_t3 = 210 * 10; spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ spec->t10 = 500 * 10; /* This one is special and actually in units of 100ms, but zero * based in the hw (so we need to add 100 ms). But the sw vbt * table multiplies it with 1000 to make it in units of 100usec, * too. */ spec->t11_t12 = (510 + 100) * 10; intel_pps_dump_state(intel_dp, "spec", spec); } static void pps_init_delays(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps.pps_delays; lockdep_assert_held(&dev_priv->display.pps.mutex); /* already initialized? */ if (pps_delays_valid(final)) return; pps_init_delays_bios(intel_dp, &cur); pps_init_delays_vbt(intel_dp, &vbt); pps_init_delays_spec(intel_dp, &spec); /* Use the max of the register settings and vbt. If both are * unset, fall back to the spec limits. */ #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ spec.field : \ max(cur.field, vbt.field)) assign_final(t1_t3); assign_final(t8); assign_final(t9); assign_final(t10); assign_final(t11_t12); #undef assign_final #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) intel_dp->pps.panel_power_up_delay = get_delay(t1_t3); intel_dp->pps.backlight_on_delay = get_delay(t8); intel_dp->pps.backlight_off_delay = get_delay(t9); intel_dp->pps.panel_power_down_delay = get_delay(t10); intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12); #undef get_delay drm_dbg_kms(&dev_priv->drm, "panel power up delay %d, power down delay %d, power cycle delay %d\n", intel_dp->pps.panel_power_up_delay, intel_dp->pps.panel_power_down_delay, intel_dp->pps.panel_power_cycle_delay); drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", intel_dp->pps.backlight_on_delay, intel_dp->pps.backlight_off_delay); /* * We override the HW backlight delays to 1 because we do manual waits * on them. For T8, even BSpec recommends doing it. For T9, if we * don't do this, we'll end up waiting for the backlight off delay * twice: once when we do the manual sleep, and once when we disable * the panel and wait for the PP_STATUS bit to become zero. */ final->t8 = 1; final->t9 = 1; /* * HW has only a 100msec granularity for t11_t12 so round it up * accordingly. */ final->t11_t12 = roundup(final->t11_t12, 100 * 10); } static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, port_sel = 0; int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->base.port; const struct edp_power_seq *seq = &intel_dp->pps.pps_delays; lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_get_registers(intel_dp, &regs); /* * On some VLV machines the BIOS can leave the VDD * enabled even on power sequencers which aren't * hooked up to any port. This would mess up the * power domain tracking the first time we pick * one of these power sequencers for use since * intel_pps_vdd_on_unlocked() would notice that the VDD was * already on and therefore wouldn't grab the power * domain reference. Disable VDD first to avoid this. * This also avoids spuriously turning the VDD on as * soon as the new power sequencer gets initialized. */ if (force_disable_vdd) { u32 pp = ilk_get_pp_control(intel_dp); drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, "Panel power already on\n"); if (pp & EDP_FORCE_VDD) drm_dbg_kms(&dev_priv->drm, "VDD already on, disabling first\n"); pp &= ~EDP_FORCE_VDD; intel_de_write(dev_priv, regs.pp_ctrl, pp); } pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { port_sel = PANEL_PORT_SELECT_VLV(port); } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { switch (port) { case PORT_A: port_sel = PANEL_PORT_SELECT_DPA; break; case PORT_C: port_sel = PANEL_PORT_SELECT_DPC; break; case PORT_D: port_sel = PANEL_PORT_SELECT_DPD; break; default: MISSING_CASE(port); break; } } pp_on |= port_sel; intel_de_write(dev_priv, regs.pp_on, pp_on); intel_de_write(dev_priv, regs.pp_off, pp_off); /* * Compute the divisor for the pp clock, simply match the Bspec formula. */ if (i915_mmio_reg_valid(regs.pp_div)) intel_de_write(dev_priv, regs.pp_div, REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); else intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); drm_dbg_kms(&dev_priv->drm, "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", intel_de_read(dev_priv, regs.pp_on), intel_de_read(dev_priv, regs.pp_off), i915_mmio_reg_valid(regs.pp_div) ? intel_de_read(dev_priv, regs.pp_div) : (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); } void intel_pps_encoder_reset(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); intel_wakeref_t wakeref; if (!intel_dp_is_edp(intel_dp)) return; with_intel_pps_lock(intel_dp, wakeref) { /* * Reinit the power sequencer also on the resume path, in case * BIOS did something nasty with it. */ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) vlv_initial_power_sequencer_setup(intel_dp); pps_init_delays(intel_dp); pps_init_registers(intel_dp, false); pps_vdd_init(intel_dp); if (edp_have_panel_vdd(intel_dp)) edp_panel_vdd_schedule_off(intel_dp); } } bool intel_pps_init(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; bool ret; intel_dp->pps.initializing = true; INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work); pps_init_timestamps(intel_dp); with_intel_pps_lock(intel_dp, wakeref) { ret = pps_initial_setup(intel_dp); pps_init_delays(intel_dp); pps_init_registers(intel_dp, false); pps_vdd_init(intel_dp); } return ret; } static void pps_init_late(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_connector *connector = intel_dp->attached_connector; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) return; if (intel_num_pps(i915) < 2) return; drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 && intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller, "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n", encoder->base.base.id, encoder->base.name, intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller); if (connector->panel.vbt.backlight.controller >= 0) intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; } void intel_pps_init_late(struct intel_dp *intel_dp) { intel_wakeref_t wakeref; with_intel_pps_lock(intel_dp, wakeref) { /* Reinit delays after per-panel info has been parsed from VBT */ pps_init_late(intel_dp); memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays)); pps_init_delays(intel_dp); pps_init_registers(intel_dp, false); intel_dp->pps.initializing = false; if (edp_have_panel_vdd(intel_dp)) edp_panel_vdd_schedule_off(intel_dp); } } void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) { int pps_num; int pps_idx; if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv)) return; /* * This w/a is needed at least on CPT/PPT, but to be sure apply it * everywhere where registers can be write protected. */ pps_num = intel_num_pps(dev_priv); for (pps_idx = 0; pps_idx < pps_num; pps_idx++) intel_de_rmw(dev_priv, PP_CONTROL(pps_idx), PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); } void intel_pps_setup(struct drm_i915_private *i915) { if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) i915->display.pps.mmio_base = PCH_PPS_BASE; else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) i915->display.pps.mmio_base = VLV_PPS_BASE; else i915->display.pps.mmio_base = PPS_BASE; } void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = INVALID_PIPE; bool locked = true; if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) return; if (HAS_PCH_SPLIT(dev_priv)) { u32 port_sel; pp_reg = PP_CONTROL(0); port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; switch (port_sel) { case PANEL_PORT_SELECT_LVDS: intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); break; case PANEL_PORT_SELECT_DPA: g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); break; case PANEL_PORT_SELECT_DPC: g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); break; case PANEL_PORT_SELECT_DPD: g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); break; default: MISSING_CASE(port_sel); break; } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* presumably write lock depends on pipe, not port select */ pp_reg = PP_CONTROL(pipe); panel_pipe = pipe; } else { u32 port_sel; pp_reg = PP_CONTROL(0); port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; drm_WARN_ON(&dev_priv->drm, port_sel != PANEL_PORT_SELECT_LVDS); intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); } val = intel_de_read(dev_priv, pp_reg); if (!(val & PANEL_POWER_ON) || ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked, "panel assertion failure, pipe %c regs locked\n", pipe_name(pipe)); }
linux-master
drivers/gpu/drm/i915/display/intel_pps.c
/* * * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter * * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_display_types.h" #include "intel_dvo_dev.h" #define NS2501_VID 0x1305 #define NS2501_DID 0x6726 #define NS2501_VID_LO 0x00 #define NS2501_VID_HI 0x01 #define NS2501_DID_LO 0x02 #define NS2501_DID_HI 0x03 #define NS2501_REV 0x04 #define NS2501_RSVD 0x05 #define NS2501_FREQ_LO 0x06 #define NS2501_FREQ_HI 0x07 #define NS2501_REG8 0x08 #define NS2501_8_VEN (1<<5) #define NS2501_8_HEN (1<<4) #define NS2501_8_DSEL (1<<3) #define NS2501_8_BPAS (1<<2) #define NS2501_8_RSVD (1<<1) #define NS2501_8_PD (1<<0) #define NS2501_REG9 0x09 #define NS2501_9_VLOW (1<<7) #define NS2501_9_MSEL_MASK (0x7<<4) #define NS2501_9_TSEL (1<<3) #define NS2501_9_RSEN (1<<2) #define NS2501_9_RSVD (1<<1) #define NS2501_9_MDI (1<<0) #define NS2501_REGC 0x0c /* * The following registers are not part of the official datasheet * and are the result of reverse engineering. */ /* * Register c0 controls how the DVO synchronizes with * its input. */ #define NS2501_REGC0 0xc0 #define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */ #define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */ #define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */ #define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */ /* * Register 41 is somehow related to the sync register and sync * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off) * and 0x00 otherwise. */ #define NS2501_REG41 0x41 /* * this register controls the dithering of the DVO * One bit enables it, the other define the dithering depth. * The higher the value, the lower the dithering depth. */ #define NS2501_F9_REG 0xf9 #define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */ #define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */ #define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */ /* * PLL configuration register. This is a pair of registers, * one single byte register at 1B, and a pair at 1C,1D. * These registers are counters/dividers. */ #define NS2501_REG1B 0x1b /* one byte PLL control register */ #define NS2501_REG1C 0x1c /* low-part of the second register */ #define NS2501_REG1D 0x1d /* high-part of the second register */ /* * Scaler control registers. Horizontal at b8,b9, * vertical at 10,11. The scale factor is computed as * 2^16/control-value. The low-byte comes first. */ #define NS2501_REG10 0x10 /* low-byte vertical scaler */ #define NS2501_REG11 0x11 /* high-byte vertical scaler */ #define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */ #define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */ /* * Display window definition. This consists of four registers * per dimension. One register pair defines the start of the * display, one the end. * As far as I understand, this defines the window within which * the scaler samples the input. */ #define NS2501_REGC1 0xc1 /* low-byte horizontal display start */ #define NS2501_REGC2 0xc2 /* high-byte horizontal display start */ #define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */ #define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */ #define NS2501_REGC5 0xc5 /* low-byte vertical display start */ #define NS2501_REGC6 0xc6 /* high-byte vertical display start */ #define NS2501_REGC7 0xc7 /* low-byte vertical display stop */ #define NS2501_REGC8 0xc8 /* high-byte vertical display stop */ /* * The following register pair seems to define the start of * the vertical sync. If automatic syncing is enabled, and the * register value defines a sync pulse that is later than the * incoming sync, then the register value is ignored and the * external hsync triggers the synchronization. */ #define NS2501_REG80 0x80 /* low-byte vsync-start */ #define NS2501_REG81 0x81 /* high-byte vsync-start */ /* * The following register pair seems to define the total number * of lines created at the output side of the scaler. * This is again a low-high register pair. */ #define NS2501_REG82 0x82 /* output display height, low byte */ #define NS2501_REG83 0x83 /* output display height, high byte */ /* * The following registers define the end of the front-porch * in horizontal and vertical position and hence allow to shift * the image left/right or up/down. */ #define NS2501_REG98 0x98 /* horizontal start of display + 256, low */ #define NS2501_REG99 0x99 /* horizontal start of display + 256, high */ #define NS2501_REG8E 0x8e /* vertical start of the display, low byte */ #define NS2501_REG8F 0x8f /* vertical start of the display, high byte */ /* * The following register pair control the function of the * backlight and the DVO output. To enable the corresponding * function, the corresponding bit must be set in both registers. */ #define NS2501_REG34 0x34 /* DVO enable functions, first register */ #define NS2501_REG35 0x35 /* DVO enable functions, second register */ #define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */ #define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */ /* * Registers 9C and 9D define the vertical output offset * of the visible region. */ #define NS2501_REG9C 0x9c #define NS2501_REG9D 0x9d /* * The register 9F defines the dithering. This requires the * scaler to be ON. Bit 0 enables dithering, the remaining * bits control the depth of the dither. The higher the value, * the LOWER the dithering amplitude. A good value seems to be * 15 (total register value). */ #define NS2501_REGF9 0xf9 #define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */ #define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */ #define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */ enum { MODE_640x480, MODE_800x600, MODE_1024x768, }; struct ns2501_reg { u8 offset; u8 value; }; /* * The following structure keeps the complete configuration of * the DVO, given a specific output configuration. * This is pretty much guess-work from reverse-engineering, so * read all this with a grain of salt. */ struct ns2501_configuration { u8 sync; /* configuration of the C0 register */ u8 conf; /* configuration register 8 */ u8 syncb; /* configuration register 41 */ u8 dither; /* configuration of the dithering */ u8 pll_a; /* PLL configuration, register A, 1B */ u16 pll_b; /* PLL configuration, register B, 1C/1D */ u16 hstart; /* horizontal start, registers C1/C2 */ u16 hstop; /* horizontal total, registers C3/C4 */ u16 vstart; /* vertical start, registers C5/C6 */ u16 vstop; /* vertical total, registers C7/C8 */ u16 vsync; /* manual vertical sync start, 80/81 */ u16 vtotal; /* number of lines generated, 82/83 */ u16 hpos; /* horizontal position + 256, 98/99 */ u16 vpos; /* vertical position, 8e/8f */ u16 voffs; /* vertical output offset, 9c/9d */ u16 hscale; /* horizontal scaling factor, b8/b9 */ u16 vscale; /* vertical scaling factor, 10/11 */ }; /* * DVO configuration values, partially based on what the BIOS * of the Fujitsu Lifebook S6010 writes into registers, * partially found by manual tweaking. These configurations assume * a 1024x768 panel. */ static const struct ns2501_configuration ns2501_modes[] = { [MODE_640x480] = { .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x32, .dither = 0x0f, .pll_a = 17, .pll_b = 852, .hstart = 144, .hstop = 783, .vstart = 22, .vstop = 514, .vsync = 2047, /* actually, ignored with this config */ .vtotal = 1341, .hpos = 0, .vpos = 16, .voffs = 36, .hscale = 40960, .vscale = 40960 }, [MODE_800x600] = { .sync = NS2501_C0_ENABLE | NS2501_C0_HSYNC | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x00, .dither = 0x0f, .pll_a = 25, .pll_b = 612, .hstart = 215, .hstop = 1016, .vstart = 26, .vstop = 627, .vsync = 807, .vtotal = 1341, .hpos = 0, .vpos = 4, .voffs = 35, .hscale = 51248, .vscale = 51232 }, [MODE_1024x768] = { .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, .syncb = 0x32, .dither = 0x0f, .pll_a = 11, .pll_b = 1350, .hstart = 276, .hstop = 1299, .vstart = 15, .vstop = 1056, .vsync = 2047, .vtotal = 1341, .hpos = 0, .vpos = 7, .voffs = 27, .hscale = 65535, .vscale = 65535 } }; /* * Other configuration values left by the BIOS of the * Fujitsu S6010 in the DVO control registers. Their * value does not depend on the BIOS and their meaning * is unknown. */ static const struct ns2501_reg mode_agnostic_values[] = { /* 08 is mode specific */ [0] = { .offset = 0x0a, .value = 0x81, }, /* 10,11 are part of the mode specific configuration */ [1] = { .offset = 0x12, .value = 0x02, }, [2] = { .offset = 0x18, .value = 0x07, }, [3] = { .offset = 0x19, .value = 0x00, }, [4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */ /* 1b,1c,1d are part of the mode specific configuration */ [5] = { .offset = 0x1e, .value = 0x02, }, [6] = { .offset = 0x1f, .value = 0x40, }, [7] = { .offset = 0x20, .value = 0x00, }, [8] = { .offset = 0x21, .value = 0x00, }, [9] = { .offset = 0x22, .value = 0x00, }, [10] = { .offset = 0x23, .value = 0x00, }, [11] = { .offset = 0x24, .value = 0x00, }, [12] = { .offset = 0x25, .value = 0x00, }, [13] = { .offset = 0x26, .value = 0x00, }, [14] = { .offset = 0x27, .value = 0x00, }, [15] = { .offset = 0x7e, .value = 0x18, }, /* 80-84 are part of the mode-specific configuration */ [16] = { .offset = 0x84, .value = 0x00, }, [17] = { .offset = 0x85, .value = 0x00, }, [18] = { .offset = 0x86, .value = 0x00, }, [19] = { .offset = 0x87, .value = 0x00, }, [20] = { .offset = 0x88, .value = 0x00, }, [21] = { .offset = 0x89, .value = 0x00, }, [22] = { .offset = 0x8a, .value = 0x00, }, [23] = { .offset = 0x8b, .value = 0x00, }, [24] = { .offset = 0x8c, .value = 0x10, }, [25] = { .offset = 0x8d, .value = 0x02, }, /* 8e,8f are part of the mode-specific configuration */ [26] = { .offset = 0x90, .value = 0xff, }, [27] = { .offset = 0x91, .value = 0x07, }, [28] = { .offset = 0x92, .value = 0xa0, }, [29] = { .offset = 0x93, .value = 0x02, }, [30] = { .offset = 0x94, .value = 0x00, }, [31] = { .offset = 0x95, .value = 0x00, }, [32] = { .offset = 0x96, .value = 0x05, }, [33] = { .offset = 0x97, .value = 0x00, }, /* 98,99 are part of the mode-specific configuration */ [34] = { .offset = 0x9a, .value = 0x88, }, [35] = { .offset = 0x9b, .value = 0x00, }, /* 9c,9d are part of the mode-specific configuration */ [36] = { .offset = 0x9e, .value = 0x25, }, [37] = { .offset = 0x9f, .value = 0x03, }, [38] = { .offset = 0xa0, .value = 0x28, }, [39] = { .offset = 0xa1, .value = 0x01, }, [40] = { .offset = 0xa2, .value = 0x28, }, [41] = { .offset = 0xa3, .value = 0x05, }, /* register 0xa4 is mode specific, but 0x80..0x84 works always */ [42] = { .offset = 0xa4, .value = 0x84, }, [43] = { .offset = 0xa5, .value = 0x00, }, [44] = { .offset = 0xa6, .value = 0x00, }, [45] = { .offset = 0xa7, .value = 0x00, }, [46] = { .offset = 0xa8, .value = 0x00, }, /* 0xa9 to 0xab are mode specific, but have no visible effect */ [47] = { .offset = 0xa9, .value = 0x04, }, [48] = { .offset = 0xaa, .value = 0x70, }, [49] = { .offset = 0xab, .value = 0x4f, }, [50] = { .offset = 0xac, .value = 0x00, }, [51] = { .offset = 0xad, .value = 0x00, }, [52] = { .offset = 0xb6, .value = 0x09, }, [53] = { .offset = 0xb7, .value = 0x03, }, /* b8,b9 are part of the mode-specific configuration */ [54] = { .offset = 0xba, .value = 0x00, }, [55] = { .offset = 0xbb, .value = 0x20, }, [56] = { .offset = 0xf3, .value = 0x90, }, [57] = { .offset = 0xf4, .value = 0x00, }, [58] = { .offset = 0xf7, .value = 0x88, }, /* f8 is mode specific, but the value does not matter */ [59] = { .offset = 0xf8, .value = 0x0a, }, [60] = { .offset = 0xf9, .value = 0x00, } }; static const struct ns2501_reg regs_init[] = { [0] = { .offset = 0x35, .value = 0xff, }, [1] = { .offset = 0x34, .value = 0x00, }, [2] = { .offset = 0x08, .value = 0x30, }, }; struct ns2501_priv { bool quiet; const struct ns2501_configuration *conf; }; #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) /* ** Read a register from the ns2501. ** Returns true if successful, false otherwise. ** If it returns false, it might be wise to enable the ** DVO with the above function. */ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!ns->quiet) { DRM_DEBUG_KMS ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* ** Write a register to the ns2501. ** Returns true if successful, false otherwise. ** If it returns false, it might be wise to enable the ** DVO with the above function. */ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) { return true; } if (!ns->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", addr, adapter->name, dvo->slave_addr); } return false; } /* National Semiconductor 2501 driver for chip on i2c bus * scan for the chip on the bus. * Hope the VBIOS initialized the PLL correctly so we can * talk to it. If not, it will not be seen and not detected. * Bummer! */ static bool ns2501_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the NS2501 chip on the specified i2c bus */ struct ns2501_priv *ns; unsigned char ch; ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); if (ns == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = ns; ns->quiet = true; if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) goto out; if (ch != (NS2501_VID & 0xff)) { DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) goto out; if (ch != (NS2501_DID & 0xff)) { DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } ns->quiet = false; DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); return true; out: kfree(ns); return false; } static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) { /* * This is a Laptop display, it doesn't have hotplugging. * Even if not, the detection bit of the 2501 is unreliable as * it only works for some display types. * It is even more unreliable as the PLL must be active for * allowing reading from the chiop. */ return connector_status_connected; } static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { DRM_DEBUG_KMS ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); /* * Currently, these are all the modes I have data from. * More might exist. Unclear how to find the native resolution * of the panel in here so we could always accept it * by disabling the scaler. */ if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) || (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) || (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) { return MODE_OK; } else { return MODE_ONE_SIZE; /* Is this a reasonable error? */ } } static void ns2501_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { const struct ns2501_configuration *conf; struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); int mode_idx, i; DRM_DEBUG_KMS ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); DRM_DEBUG_KMS("Detailed requested mode settings are:\n" "clock : %d kHz\n" "hdisplay : %d\n" "hblank start : %d\n" "hblank end : %d\n" "hsync start : %d\n" "hsync end : %d\n" "htotal : %d\n" "hskew : %d\n" "vdisplay : %d\n" "vblank start : %d\n" "hblank end : %d\n" "vsync start : %d\n" "vsync end : %d\n" "vtotal : %d\n", adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_hblank_start, adjusted_mode->crtc_hblank_end, adjusted_mode->crtc_hsync_start, adjusted_mode->crtc_hsync_end, adjusted_mode->crtc_htotal, adjusted_mode->crtc_hskew, adjusted_mode->crtc_vdisplay, adjusted_mode->crtc_vblank_start, adjusted_mode->crtc_vblank_end, adjusted_mode->crtc_vsync_start, adjusted_mode->crtc_vsync_end, adjusted_mode->crtc_vtotal); if (mode->hdisplay == 640 && mode->vdisplay == 480) mode_idx = MODE_640x480; else if (mode->hdisplay == 800 && mode->vdisplay == 600) mode_idx = MODE_800x600; else if (mode->hdisplay == 1024 && mode->vdisplay == 768) mode_idx = MODE_1024x768; else return; /* Hopefully doing it every time won't hurt... */ for (i = 0; i < ARRAY_SIZE(regs_init); i++) ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value); /* Write the mode-agnostic values */ for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++) ns2501_writeb(dvo, mode_agnostic_values[i].offset, mode_agnostic_values[i].value); /* Write now the mode-specific configuration */ conf = ns2501_modes + mode_idx; ns->conf = conf; ns2501_writeb(dvo, NS2501_REG8, conf->conf); ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a); ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff); ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8); ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff); ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8); ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff); ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8); ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff); ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8); ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff); ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8); ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff); ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8); ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff); ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8); ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff); ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8); ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff); ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8); ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff); ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8); ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff); ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8); ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff); ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8); ns2501_writeb(dvo, NS2501_REGF9, conf->dither); ns2501_writeb(dvo, NS2501_REG41, conf->syncb); ns2501_writeb(dvo, NS2501_REGC0, conf->sync); } /* set the NS2501 power state */ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) { unsigned char ch; if (!ns2501_readb(dvo, NS2501_REG8, &ch)) return false; return ch & NS2501_8_PD; } /* set the NS2501 power state */ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) { struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); if (enable) { ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08); ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb); ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); msleep(15); ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf | NS2501_8_BPAS); if (!(ns->conf->conf & NS2501_8_BPAS)) ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf); msleep(200); ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT); ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync); } else { ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); msleep(200); ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN | NS2501_8_BPAS); msleep(15); ns2501_writeb(dvo, NS2501_REG34, 0x00); } } static void ns2501_destroy(struct intel_dvo_device *dvo) { struct ns2501_priv *ns = dvo->dev_priv; if (ns) { kfree(ns); dvo->dev_priv = NULL; } } const struct intel_dvo_dev_ops ns2501_ops = { .init = ns2501_init, .detect = ns2501_detect, .mode_valid = ns2501_mode_valid, .mode_set = ns2501_mode_set, .dpms = ns2501_dpms, .get_hw_state = ns2501_get_hw_state, .destroy = ns2501_destroy, };
linux-master
drivers/gpu/drm/i915/display/dvo_ns2501.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_driver.h" #include "intel_display_power_map.h" #include "intel_display_types.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_mg_phy_regs.h" #include "intel_modeset_lock.h" #include "intel_tc.h" #define DP_PIN_ASSIGNMENT_C 0x3 #define DP_PIN_ASSIGNMENT_D 0x4 #define DP_PIN_ASSIGNMENT_E 0x5 enum tc_port_mode { TC_PORT_DISCONNECTED, TC_PORT_TBT_ALT, TC_PORT_DP_ALT, TC_PORT_LEGACY, }; struct intel_tc_port; struct intel_tc_phy_ops { enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc); u32 (*hpd_live_status)(struct intel_tc_port *tc); bool (*is_ready)(struct intel_tc_port *tc); bool (*is_owned)(struct intel_tc_port *tc); void (*get_hw_state)(struct intel_tc_port *tc); bool (*connect)(struct intel_tc_port *tc, int required_lanes); void (*disconnect)(struct intel_tc_port *tc); void (*init)(struct intel_tc_port *tc); }; struct intel_tc_port { struct intel_digital_port *dig_port; const struct intel_tc_phy_ops *phy_ops; struct mutex lock; /* protects the TypeC port mode */ intel_wakeref_t lock_wakeref; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) enum intel_display_power_domain lock_power_domain; #endif struct delayed_work disconnect_phy_work; struct delayed_work link_reset_work; int link_refcount; bool legacy_port:1; char port_name[8]; enum tc_port_mode mode; enum tc_port_mode init_mode; enum phy_fia phy_fia; u8 phy_fia_idx; }; static enum intel_display_power_domain tc_phy_cold_off_domain(struct intel_tc_port *); static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc); static bool tc_phy_is_ready(struct intel_tc_port *tc); static bool tc_phy_wait_for_ready(struct intel_tc_port *tc); static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc); static const char *tc_port_mode_name(enum tc_port_mode mode) { static const char * const names[] = { [TC_PORT_DISCONNECTED] = "disconnected", [TC_PORT_TBT_ALT] = "tbt-alt", [TC_PORT_DP_ALT] = "dp-alt", [TC_PORT_LEGACY] = "legacy", }; if (WARN_ON(mode >= ARRAY_SIZE(names))) mode = TC_PORT_DISCONNECTED; return names[mode]; } static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port) { return dig_port->tc; } static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) { return to_i915(tc->dig_port->base.base.dev); } static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); struct intel_tc_port *tc = to_tc_port(dig_port); return intel_phy_is_tc(i915, phy) && tc->mode == mode; } bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) { return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT); } bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port) { return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT); } bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) { return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); } /* * The display power domains used for TC ports depending on the * platform and TC mode (legacy, DP-alt, TBT): * * POWER_DOMAIN_DISPLAY_CORE: * -------------------------- * ADLP/all modes: * - TCSS/IOM access for PHY ready state. * ADLP+/all modes: * - DE/north-,south-HPD ISR access for HPD live state. * * POWER_DOMAIN_PORT_DDI_LANES_<port>: * ----------------------------------- * ICL+/all modes: * - DE/DDI_BUF access for port enabled state. * ADLP/all modes: * - DE/DDI_BUF access for PHY owned state. * * POWER_DOMAIN_AUX_USBC<TC port index>: * ------------------------------------- * ICL/legacy mode: * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state * - TCSS/PHY: block TC-cold power state for using the PHY AUX and * main lanes. * ADLP/legacy, DP-alt modes: * - TCSS/PHY: block TC-cold power state for using the PHY AUX and * main lanes. * * POWER_DOMAIN_TC_COLD_OFF: * ------------------------- * ICL/DP-alt, TBT mode: * - TCSS/TBT: block TC-cold power state for using the (direct or * TBT DP-IN) AUX and main lanes. * * TGL/all modes: * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state * - TCSS/PHY: block TC-cold power state for using the (direct or * TBT DP-IN) AUX and main lanes. * * ADLP/TBT mode: * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN) * AUX and main lanes. * * XELPDP+/all modes: * - TCSS/IOM,FIA access for PHY ready, owned state * - TCSS/PHY: block TC-cold power state for using the (direct or * TBT DP-IN) AUX and main lanes. */ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); return tc_phy_cold_off_domain(tc) == intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } static intel_wakeref_t __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain) { struct drm_i915_private *i915 = tc_to_i915(tc); *domain = tc_phy_cold_off_domain(tc); return intel_display_power_get(i915, *domain); } static intel_wakeref_t tc_cold_block(struct intel_tc_port *tc) { enum intel_display_power_domain domain; intel_wakeref_t wakeref; wakeref = __tc_cold_block(tc, &domain); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) tc->lock_power_domain = domain; #endif return wakeref; } static void __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { struct drm_i915_private *i915 = tc_to_i915(tc); intel_display_power_put(i915, domain, wakeref); } static void tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) { enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); #endif __tc_cold_unblock(tc, domain, wakeref); } static void assert_display_core_power_enabled(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE)); } static void assert_tc_cold_blocked(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); bool enabled; enabled = intel_display_power_is_enabled(i915, tc_phy_cold_off_domain(tc)); drm_WARN_ON(&i915->drm, !enabled); } static enum intel_display_power_domain tc_port_power_domain(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; } static void assert_tc_port_power_enabled(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, tc_port_power_domain(tc))); } u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); u32 lane_mask; lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); assert_tc_cold_blocked(tc); lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); u32 pin_mask; pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); assert_tc_cold_blocked(tc); return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } static int mtl_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref; u32 pin_mask; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port); switch (pin_mask) { default: MISSING_CASE(pin_mask); fallthrough; case DP_PIN_ASSIGNMENT_D: return 2; case DP_PIN_ASSIGNMENT_C: case DP_PIN_ASSIGNMENT_E: return 4; } } int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_wakeref_t wakeref; u32 lane_mask; if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT) return 4; assert_tc_cold_blocked(tc); if (DISPLAY_VER(i915) >= 14) return mtl_tc_port_get_pin_assignment_mask(dig_port); lane_mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) lane_mask = intel_tc_port_get_lane_mask(dig_port); switch (lane_mask) { default: MISSING_CASE(lane_mask); fallthrough; case 0x1: case 0x2: case 0x4: case 0x8: return 1; case 0x3: case 0xc: return 2; case 0xf: return 4; } } void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; u32 val; drm_WARN_ON(&i915->drm, lane_reversal && tc->mode != TC_PORT_LEGACY); assert_tc_cold_blocked(tc); val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); switch (required_lanes) { case 1: val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) : DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx); break; case 2: val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) : DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx); break; case 4: val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx); break; default: MISSING_CASE(required_lanes); } intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); } static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, u32 live_status_mask) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 valid_hpd_mask; drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); if (hweight32(live_status_mask) != 1) return; if (tc->legacy_port) valid_hpd_mask = BIT(TC_PORT_LEGACY); else valid_hpd_mask = BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_TBT_ALT); if (!(live_status_mask & ~valid_hpd_mask)) return; /* If live status mismatches the VBT flag, trust the live status. */ drm_dbg_kms(&i915->drm, "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", tc->port_name, live_status_mask, valid_hpd_mask); tc->legacy_port = !tc->legacy_port; } static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(i915, port); /* * Each Modular FIA instance houses 2 TC ports. In SOC that has more * than two TC ports, there are multiple instances of Modular FIA. */ if (modular_fia) { tc->phy_fia = tc_port / 2; tc->phy_fia_idx = tc_port % 2; } else { tc->phy_fia = FIA1; tc->phy_fia_idx = tc_port; } } /* * ICL TC PHY handlers * ------------------- */ static enum intel_display_power_domain icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; if (tc->legacy_port) return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); return POWER_DOMAIN_TC_COLD_OFF; } static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; intel_wakeref_t wakeref; u32 fia_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) { fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); pch_isr = intel_de_read(i915, SDEISR); } if (fia_isr == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, nothing connected\n", tc->port_name); return mask; } if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx)) mask |= BIT(TC_PORT_TBT_ALT); if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx)) mask |= BIT(TC_PORT_DP_ALT); if (pch_isr & isr_bit) mask |= BIT(TC_PORT_LEGACY); return mask; } /* * Return the PHY status complete flag indicating that display can acquire the * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink * is connected and it's ready to switch the ownership to display. The flag * will be left cleared when a TBT-alt sink is connected, where the PHY is * owned by the TBT subsystem and so switching the ownership to display is not * required. */ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; assert_tc_cold_blocked(tc); val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not ready\n", tc->port_name); return false; } return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx); } static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; assert_tc_cold_blocked(tc); val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, can't %s ownership\n", tc->port_name, take ? "take" : "release"); return false; } val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); if (take) val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); return true; } static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; assert_tc_cold_blocked(tc); val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assume not owned\n", tc->port_name); return false; } return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); } static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) { enum intel_display_power_domain domain; intel_wakeref_t tc_cold_wref; tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); if (tc->mode != TC_PORT_DISCONNECTED) tc->lock_wakeref = tc_cold_block(tc); __tc_cold_unblock(tc, domain, tc_cold_wref); } /* * This function implements the first part of the Connect Flow described by our * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading * lanes, EDID, etc) is done as needed in the typical places. * * Unlike the other ports, type-C ports are not available to use as soon as we * get a hotplug. The type-C PHYs can be shared between multiple controllers: * display, USB, etc. As a result, handshaking through FIA is required around * connect and disconnect to cleanly transfer ownership with the controller and * set the type-C power state. */ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, int required_lanes) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; int max_lanes; max_lanes = intel_tc_port_fia_max_lane_count(dig_port); if (tc->mode == TC_PORT_LEGACY) { drm_WARN_ON(&i915->drm, max_lanes != 4); return true; } drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); /* * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", tc->port_name); return false; } if (max_lanes < required_lanes) { drm_dbg_kms(&i915->drm, "Port %s: PHY max lanes %d < required lanes %d\n", tc->port_name, max_lanes, required_lanes); return false; } return true; } static bool icl_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { struct drm_i915_private *i915 = tc_to_i915(tc); tc->lock_wakeref = tc_cold_block(tc); if (tc->mode == TC_PORT_TBT_ALT) return true; if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n", tc->port_name, str_yes_no(tc_phy_is_ready(tc))); goto out_unblock_tc_cold; } if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; return true; out_release_phy: icl_tc_phy_take_ownership(tc, false); out_unblock_tc_cold: tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); return false; } /* * See the comment at the connect function. This implements the Disconnect * Flow. */ static void icl_tc_phy_disconnect(struct intel_tc_port *tc) { switch (tc->mode) { case TC_PORT_LEGACY: case TC_PORT_DP_ALT: icl_tc_phy_take_ownership(tc, false); fallthrough; case TC_PORT_TBT_ALT: tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); break; default: MISSING_CASE(tc->mode); } } static void icl_tc_phy_init(struct intel_tc_port *tc) { tc_phy_load_fia_params(tc, false); } static const struct intel_tc_phy_ops icl_tc_phy_ops = { .cold_off_domain = icl_tc_phy_cold_off_domain, .hpd_live_status = icl_tc_phy_hpd_live_status, .is_ready = icl_tc_phy_is_ready, .is_owned = icl_tc_phy_is_owned, .get_hw_state = icl_tc_phy_get_hw_state, .connect = icl_tc_phy_connect, .disconnect = icl_tc_phy_disconnect, .init = icl_tc_phy_init, }; /* * TGL TC PHY handlers * ------------------- */ static enum intel_display_power_domain tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) { return POWER_DOMAIN_TC_COLD_OFF; } static void tgl_tc_phy_init(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); intel_wakeref_t wakeref; u32 val; with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1)); drm_WARN_ON(&i915->drm, val == 0xffffffff); tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); } static const struct intel_tc_phy_ops tgl_tc_phy_ops = { .cold_off_domain = tgl_tc_phy_cold_off_domain, .hpd_live_status = icl_tc_phy_hpd_live_status, .is_ready = icl_tc_phy_is_ready, .is_owned = icl_tc_phy_is_owned, .get_hw_state = icl_tc_phy_get_hw_state, .connect = icl_tc_phy_connect, .disconnect = icl_tc_phy_disconnect, .init = tgl_tc_phy_init, }; /* * ADLP TC PHY handlers * -------------------- */ static enum intel_display_power_domain adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; if (tc->mode != TC_PORT_TBT_ALT) return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); return POWER_DOMAIN_TC_COLD_OFF; } static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum hpd_pin hpd_pin = dig_port->base.hpd_pin; u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin]; u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; intel_wakeref_t wakeref; u32 cpu_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) { cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR); pch_isr = intel_de_read(i915, SDEISR); } if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK)) mask |= BIT(TC_PORT_DP_ALT); if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK)) mask |= BIT(TC_PORT_TBT_ALT); if (pch_isr & pch_isr_bit) mask |= BIT(TC_PORT_LEGACY); return mask; } /* * Return the PHY status complete flag indicating that display can acquire the * PHY ownership. The IOM firmware sets this flag when it's ready to switch * the ownership to display, regardless of what sink is connected (TBT-alt, * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT * subsystem and so switching the ownership to display is not required. */ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); u32 val; assert_display_core_power_enabled(tc); val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not ready\n", tc->port_name); return false; } return val & TCSS_DDI_STATUS_READY; } static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; assert_tc_port_power_enabled(tc); intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); return true; } static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; u32 val; assert_tc_port_power_enabled(tc); val = intel_de_read(i915, DDI_BUF_CTL(port)); return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; } static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum intel_display_power_domain port_power_domain = tc_port_power_domain(tc); intel_wakeref_t port_wakeref; port_wakeref = intel_display_power_get(i915, port_power_domain); tc->mode = tc_phy_get_current_mode(tc); if (tc->mode != TC_PORT_DISCONNECTED) tc->lock_wakeref = tc_cold_block(tc); intel_display_power_put(i915, port_power_domain, port_wakeref); } static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { struct drm_i915_private *i915 = tc_to_i915(tc); enum intel_display_power_domain port_power_domain = tc_port_power_domain(tc); intel_wakeref_t port_wakeref; if (tc->mode == TC_PORT_TBT_ALT) { tc->lock_wakeref = tc_cold_block(tc); return true; } port_wakeref = intel_display_power_get(i915, port_power_domain); if (!adlp_tc_phy_take_ownership(tc, true) && !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n", tc->port_name); goto out_put_port_power; } if (!tc_phy_is_ready(tc) && !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", tc->port_name); goto out_release_phy; } tc->lock_wakeref = tc_cold_block(tc); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_unblock_tc_cold; intel_display_power_put(i915, port_power_domain, port_wakeref); return true; out_unblock_tc_cold: tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); out_release_phy: adlp_tc_phy_take_ownership(tc, false); out_put_port_power: intel_display_power_put(i915, port_power_domain, port_wakeref); return false; } static void adlp_tc_phy_disconnect(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum intel_display_power_domain port_power_domain = tc_port_power_domain(tc); intel_wakeref_t port_wakeref; port_wakeref = intel_display_power_get(i915, port_power_domain); tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); switch (tc->mode) { case TC_PORT_LEGACY: case TC_PORT_DP_ALT: adlp_tc_phy_take_ownership(tc, false); fallthrough; case TC_PORT_TBT_ALT: break; default: MISSING_CASE(tc->mode); } intel_display_power_put(i915, port_power_domain, port_wakeref); } static void adlp_tc_phy_init(struct intel_tc_port *tc) { tc_phy_load_fia_params(tc, true); } static const struct intel_tc_phy_ops adlp_tc_phy_ops = { .cold_off_domain = adlp_tc_phy_cold_off_domain, .hpd_live_status = adlp_tc_phy_hpd_live_status, .is_ready = adlp_tc_phy_is_ready, .is_owned = adlp_tc_phy_is_owned, .get_hw_state = adlp_tc_phy_get_hw_state, .connect = adlp_tc_phy_connect, .disconnect = adlp_tc_phy_disconnect, .init = adlp_tc_phy_init, }; /* * XELPDP TC PHY handlers * ---------------------- */ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum hpd_pin hpd_pin = dig_port->base.hpd_pin; u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin]; u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; intel_wakeref_t wakeref; u32 pica_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) { pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR); pch_isr = intel_de_read(i915, SDEISR); } if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK)) mask |= BIT(TC_PORT_DP_ALT); if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK)) mask |= BIT(TC_PORT_TBT_ALT); if (tc->legacy_port && (pch_isr & pch_isr_bit)) mask |= BIT(TC_PORT_LEGACY); return mask; } static bool xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; assert_tc_cold_blocked(tc); return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE; } static bool xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) { struct drm_i915_private *i915 = tc_to_i915(tc); if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { drm_dbg_kms(&i915->drm, "Port %s: timeout waiting for TCSS power to get %s\n", enabled ? "enabled" : "disabled", tc->port_name); return false; } return true; } static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; u32 val; assert_tc_cold_blocked(tc); val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); if (enable) val |= XELPDP_TCSS_POWER_REQUEST; else val &= ~XELPDP_TCSS_POWER_REQUEST; intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); } static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) { struct drm_i915_private *i915 = tc_to_i915(tc); __xelpdp_tc_phy_enable_tcss_power(tc, enable); if ((!tc_phy_wait_for_ready(tc) || !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) && !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { if (enable) { __xelpdp_tc_phy_enable_tcss_power(tc, false); xelpdp_tc_phy_wait_for_tcss_power(tc, false); } return false; } return true; } static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; u32 val; assert_tc_cold_blocked(tc); val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)); if (take) val |= XELPDP_TC_PHY_OWNERSHIP; else val &= ~XELPDP_TC_PHY_OWNERSHIP; intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val); } static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum port port = tc->dig_port->base.port; assert_tc_cold_blocked(tc); return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP; } static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); intel_wakeref_t tc_cold_wref; enum intel_display_power_domain domain; tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); if (tc->mode != TC_PORT_DISCONNECTED) tc->lock_wakeref = tc_cold_block(tc); drm_WARN_ON(&i915->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); __tc_cold_unblock(tc, domain, tc_cold_wref); } static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { tc->lock_wakeref = tc_cold_block(tc); if (tc->mode == TC_PORT_TBT_ALT) return true; if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) goto out_unblock_tccold; xelpdp_tc_phy_take_ownership(tc, true); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; return true; out_release_phy: xelpdp_tc_phy_take_ownership(tc, false); xelpdp_tc_phy_wait_for_tcss_power(tc, false); out_unblock_tccold: tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); return false; } static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc) { switch (tc->mode) { case TC_PORT_LEGACY: case TC_PORT_DP_ALT: xelpdp_tc_phy_take_ownership(tc, false); xelpdp_tc_phy_enable_tcss_power(tc, false); fallthrough; case TC_PORT_TBT_ALT: tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); break; default: MISSING_CASE(tc->mode); } } static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = { .cold_off_domain = tgl_tc_phy_cold_off_domain, .hpd_live_status = xelpdp_tc_phy_hpd_live_status, .is_ready = adlp_tc_phy_is_ready, .is_owned = xelpdp_tc_phy_is_owned, .get_hw_state = xelpdp_tc_phy_get_hw_state, .connect = xelpdp_tc_phy_connect, .disconnect = xelpdp_tc_phy_disconnect, .init = adlp_tc_phy_init, }; /* * Generic TC PHY handlers * ----------------------- */ static enum intel_display_power_domain tc_phy_cold_off_domain(struct intel_tc_port *tc) { return tc->phy_ops->cold_off_domain(tc); } static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 mask; mask = tc->phy_ops->hpd_live_status(tc); /* The sink can be connected only in a single mode. */ drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1); return mask; } static bool tc_phy_is_ready(struct intel_tc_port *tc) { return tc->phy_ops->is_ready(tc); } static bool tc_phy_is_owned(struct intel_tc_port *tc) { return tc->phy_ops->is_owned(tc); } static void tc_phy_get_hw_state(struct intel_tc_port *tc) { tc->phy_ops->get_hw_state(tc); } static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, bool phy_is_ready, bool phy_is_owned) { struct drm_i915_private *i915 = tc_to_i915(tc); drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); return phy_is_ready && phy_is_owned; } static bool tc_phy_is_connected(struct intel_tc_port *tc, enum icl_port_dpll_id port_pll_type) { struct intel_encoder *encoder = &tc->dig_port->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); bool phy_is_ready = tc_phy_is_ready(tc); bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; drm_dbg_kms(&i915->drm, "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", tc->port_name, str_yes_no(is_connected), str_yes_no(phy_is_ready), str_yes_no(phy_is_owned), port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt"); return is_connected; } static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); if (wait_for(tc_phy_is_ready(tc), 500)) { drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", tc->port_name); return false; } return true; } static enum tc_port_mode hpd_mask_to_tc_mode(u32 live_status_mask) { if (live_status_mask) return fls(live_status_mask) - 1; return TC_PORT_DISCONNECTED; } static enum tc_port_mode tc_phy_hpd_live_mode(struct intel_tc_port *tc) { u32 live_status_mask = tc_phy_hpd_live_status(tc); return hpd_mask_to_tc_mode(live_status_mask); } static enum tc_port_mode get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc, enum tc_port_mode live_mode) { switch (live_mode) { case TC_PORT_LEGACY: case TC_PORT_DP_ALT: return live_mode; default: MISSING_CASE(live_mode); fallthrough; case TC_PORT_TBT_ALT: case TC_PORT_DISCONNECTED: if (tc->legacy_port) return TC_PORT_LEGACY; else return TC_PORT_DP_ALT; } } static enum tc_port_mode get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, enum tc_port_mode live_mode) { switch (live_mode) { case TC_PORT_LEGACY: return TC_PORT_DISCONNECTED; case TC_PORT_DP_ALT: case TC_PORT_TBT_ALT: return TC_PORT_TBT_ALT; default: MISSING_CASE(live_mode); fallthrough; case TC_PORT_DISCONNECTED: if (tc->legacy_port) return TC_PORT_DISCONNECTED; else return TC_PORT_TBT_ALT; } } static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); bool phy_is_ready; bool phy_is_owned; enum tc_port_mode mode; /* * For legacy ports the IOM firmware initializes the PHY during boot-up * and system resume whether or not a sink is connected. Wait here for * the initialization to get ready. */ if (tc->legacy_port) tc_phy_wait_for_ready(tc); phy_is_ready = tc_phy_is_ready(tc); phy_is_owned = tc_phy_is_owned(tc); if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); mode = get_tc_mode_in_phy_owned_state(tc, live_mode); } drm_dbg_kms(&i915->drm, "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", tc->port_name, tc_port_mode_name(mode), str_yes_no(phy_is_ready), str_yes_no(phy_is_owned), tc_port_mode_name(live_mode)); return mode; } static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc) { if (tc->legacy_port) return TC_PORT_LEGACY; return TC_PORT_TBT_ALT; } static enum tc_port_mode hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask) { enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask); if (mode != TC_PORT_DISCONNECTED) return mode; return default_tc_mode(tc); } static enum tc_port_mode tc_phy_get_target_mode(struct intel_tc_port *tc) { u32 live_status_mask = tc_phy_hpd_live_status(tc); return hpd_mask_to_target_mode(tc, live_status_mask); } static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { struct drm_i915_private *i915 = tc_to_i915(tc); u32 live_status_mask = tc_phy_hpd_live_status(tc); bool connected; tc_port_fixup_legacy_flag(tc, live_status_mask); tc->mode = hpd_mask_to_target_mode(tc, live_status_mask); connected = tc->phy_ops->connect(tc, required_lanes); if (!connected && tc->mode != default_tc_mode(tc)) { tc->mode = default_tc_mode(tc); connected = tc->phy_ops->connect(tc, required_lanes); } drm_WARN_ON(&i915->drm, !connected); } static void tc_phy_disconnect(struct intel_tc_port *tc) { if (tc->mode != TC_PORT_DISCONNECTED) { tc->phy_ops->disconnect(tc); tc->mode = TC_PORT_DISCONNECTED; } } static void tc_phy_init(struct intel_tc_port *tc) { mutex_lock(&tc->lock); tc->phy_ops->init(tc); mutex_unlock(&tc->lock); } static void intel_tc_port_reset_mode(struct intel_tc_port *tc, int required_lanes, bool force_disconnect) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum tc_port_mode old_tc_mode = tc->mode; intel_display_power_flush_work(i915); if (!intel_tc_cold_requires_aux_pw(dig_port)) { enum intel_display_power_domain aux_domain; bool aux_powered; aux_domain = intel_aux_power_domain(dig_port); aux_powered = intel_display_power_is_enabled(i915, aux_domain); drm_WARN_ON(&i915->drm, aux_powered); } tc_phy_disconnect(tc); if (!force_disconnect) tc_phy_connect(tc, required_lanes); drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", tc->port_name, tc_port_mode_name(old_tc_mode), tc_port_mode_name(tc->mode)); } static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) { return tc_phy_get_target_mode(tc) != tc->mode; } static void intel_tc_port_update_mode(struct intel_tc_port *tc, int required_lanes, bool force_disconnect) { if (force_disconnect || intel_tc_port_needs_reset(tc)) intel_tc_port_reset_mode(tc, required_lanes, force_disconnect); } static void __intel_tc_port_get_link(struct intel_tc_port *tc) { tc->link_refcount++; } static void __intel_tc_port_put_link(struct intel_tc_port *tc) { tc->link_refcount--; } static bool tc_port_is_enabled(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; assert_tc_port_power_enabled(tc); return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & DDI_BUF_CTL_ENABLE; } /** * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode * @dig_port: digital port * * Read out the HW state and initialize the TypeC mode of @dig_port. The mode * will be locked until intel_tc_port_sanitize_mode() is called. */ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); bool update_mode = false; mutex_lock(&tc->lock); drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); drm_WARN_ON(&i915->drm, tc->lock_wakeref); drm_WARN_ON(&i915->drm, tc->link_refcount); tc_phy_get_hw_state(tc); /* * Save the initial mode for the state check in * intel_tc_port_sanitize_mode(). */ tc->init_mode = tc->mode; /* * The PHY needs to be connected for AUX to work during HW readout and * MST topology resume, but the PHY mode can only be changed if the * port is disabled. * * An exception is the case where BIOS leaves the PHY incorrectly * disconnected on an enabled legacy port. Work around that by * connecting the PHY even though the port is enabled. This doesn't * cause a problem as the PHY ownership state is ignored by the * IOM/TCSS firmware (only display can own the PHY in that case). */ if (!tc_port_is_enabled(tc)) { update_mode = true; } else if (tc->mode == TC_PORT_DISCONNECTED) { drm_WARN_ON(&i915->drm, !tc->legacy_port); drm_err(&i915->drm, "Port %s: PHY disconnected on enabled port, connecting it\n", tc->port_name); update_mode = true; } if (update_mode) intel_tc_port_update_mode(tc, 1, false); /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */ __intel_tc_port_get_link(tc); mutex_unlock(&tc->lock); } static bool tc_port_has_active_links(struct intel_tc_port *tc, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; int active_links = 0; if (dig_port->dp.is_mst) { /* TODO: get the PLL type for MST, once HW readout is done for it. */ active_links = intel_dp_mst_encoder_active_links(dig_port); } else if (crtc_state && crtc_state->hw.active) { pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state); active_links = 1; } if (active_links && !tc_phy_is_connected(tc, pll_type)) drm_err(&i915->drm, "Port %s: PHY disconnected with %d active link(s)\n", tc->port_name, active_links); return active_links; } /** * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode * @dig_port: digital port * @crtc_state: atomic state of CRTC connected to @dig_port * * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver * loading and system resume: * If the encoder is enabled keep the TypeC mode/PHY connected state locked until * the encoder is disabled. * If the encoder is disabled make sure the PHY is disconnected. * @crtc_state is valid if @dig_port is enabled, NULL otherwise. */ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); mutex_lock(&tc->lock); drm_WARN_ON(&i915->drm, tc->link_refcount != 1); if (!tc_port_has_active_links(tc, crtc_state)) { /* * TBT-alt is the default mode in any case the PHY ownership is not * held (regardless of the sink's connected live state), so * we'll just switch to disconnected mode from it here without * a note. */ if (tc->init_mode != TC_PORT_TBT_ALT && tc->init_mode != TC_PORT_DISCONNECTED) drm_dbg_kms(&i915->drm, "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", tc->port_name, tc_port_mode_name(tc->init_mode)); tc_phy_disconnect(tc); __intel_tc_port_put_link(tc); } drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", tc->port_name, tc_port_mode_name(tc->mode)); mutex_unlock(&tc->lock); } /* * The type-C ports are different because even when they are connected, they may * not be available/usable by the graphics driver: see the comment on * icl_tc_phy_connect(). So in our driver instead of adding the additional * concept of "usable" and make everything check for "connected and usable" we * define a port as "connected" when it is not only connected, but also when it * is usable by the rest of the driver. That maintains the old assumption that * connected ports are usable, and avoids exposing to the users objects they * can't really use. */ bool intel_tc_port_connected_locked(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); u32 mask = ~0; drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); if (tc->mode != TC_PORT_DISCONNECTED) mask = BIT(tc->mode); return tc_phy_hpd_live_status(tc) & mask; } bool intel_tc_port_connected(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_tc_port *tc = to_tc_port(dig_port); bool is_connected; mutex_lock(&tc->lock); is_connected = intel_tc_port_connected_locked(encoder); mutex_unlock(&tc->lock); return is_connected; } static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) { bool ret; mutex_lock(&tc->lock); ret = tc->link_refcount && tc->mode == TC_PORT_DP_ALT && intel_tc_port_needs_reset(tc); mutex_unlock(&tc->lock); return ret; } bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); if (!intel_phy_is_tc(i915, phy)) return false; return __intel_tc_port_link_needs_reset(to_tc_port(dig_port)); } static int reset_link_commit(struct intel_tc_port *tc, struct intel_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base); struct intel_crtc *crtc; u8 pipe_mask; int ret; ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx); if (ret) return ret; ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); if (ret) return ret; if (!pipe_mask) return 0; for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { struct intel_crtc_state *crtc_state; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); crtc_state->uapi.connectors_changed = true; } if (!__intel_tc_port_link_needs_reset(tc)) return 0; return drm_atomic_commit(&state->base); } static int reset_link(struct intel_tc_port *tc) { struct drm_i915_private *i915 = tc_to_i915(tc); struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *_state; struct intel_atomic_state *state; int ret; _state = drm_atomic_state_alloc(&i915->drm); if (!_state) return -ENOMEM; state = to_intel_atomic_state(_state); state->internal = true; intel_modeset_lock_ctx_retry(&ctx, state, 0, ret) ret = reset_link_commit(tc, state, &ctx); drm_atomic_state_put(&state->base); return ret; } static void intel_tc_port_link_reset_work(struct work_struct *work) { struct intel_tc_port *tc = container_of(work, struct intel_tc_port, link_reset_work.work); struct drm_i915_private *i915 = tc_to_i915(tc); int ret; if (!__intel_tc_port_link_needs_reset(tc)) return; mutex_lock(&i915->drm.mode_config.mutex); drm_dbg_kms(&i915->drm, "Port %s: TypeC DP-alt sink disconnected, resetting link\n", tc->port_name); ret = reset_link(tc); drm_WARN_ON(&i915->drm, ret); mutex_unlock(&i915->drm.mode_config.mutex); } bool intel_tc_port_link_reset(struct intel_digital_port *dig_port) { if (!intel_tc_port_link_needs_reset(dig_port)) return false; queue_delayed_work(system_unbound_wq, &to_tc_port(dig_port)->link_reset_work, msecs_to_jiffies(2000)); return true; } void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); struct intel_tc_port *tc = to_tc_port(dig_port); if (!intel_phy_is_tc(i915, phy)) return; cancel_delayed_work(&tc->link_reset_work); } static void __intel_tc_port_lock(struct intel_tc_port *tc, int required_lanes) { struct drm_i915_private *i915 = tc_to_i915(tc); mutex_lock(&tc->lock); cancel_delayed_work(&tc->disconnect_phy_work); if (!tc->link_refcount) intel_tc_port_update_mode(tc, required_lanes, false); drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc)); } void intel_tc_port_lock(struct intel_digital_port *dig_port) { __intel_tc_port_lock(to_tc_port(dig_port), 1); } /* * Disconnect the given digital port from its TypeC PHY (handing back the * control of the PHY to the TypeC subsystem). This will happen in a delayed * manner after each aux transactions and modeset disables. */ static void intel_tc_port_disconnect_phy_work(struct work_struct *work) { struct intel_tc_port *tc = container_of(work, struct intel_tc_port, disconnect_phy_work.work); mutex_lock(&tc->lock); if (!tc->link_refcount) intel_tc_port_update_mode(tc, 1, true); mutex_unlock(&tc->lock); } /** * intel_tc_port_flush_work: flush the work disconnecting the PHY * @dig_port: digital port * * Flush the delayed work disconnecting an idle PHY. */ static void intel_tc_port_flush_work(struct intel_digital_port *dig_port) { flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work); } void intel_tc_port_suspend(struct intel_digital_port *dig_port) { struct intel_tc_port *tc = to_tc_port(dig_port); cancel_delayed_work_sync(&tc->link_reset_work); intel_tc_port_flush_work(dig_port); } void intel_tc_port_unlock(struct intel_digital_port *dig_port) { struct intel_tc_port *tc = to_tc_port(dig_port); if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED) queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work, msecs_to_jiffies(1000)); mutex_unlock(&tc->lock); } bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) { struct intel_tc_port *tc = to_tc_port(dig_port); return mutex_is_locked(&tc->lock) || tc->link_refcount; } void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { struct intel_tc_port *tc = to_tc_port(dig_port); __intel_tc_port_lock(tc, required_lanes); __intel_tc_port_get_link(tc); intel_tc_port_unlock(dig_port); } void intel_tc_port_put_link(struct intel_digital_port *dig_port) { struct intel_tc_port *tc = to_tc_port(dig_port); intel_tc_port_lock(dig_port); __intel_tc_port_put_link(tc); intel_tc_port_unlock(dig_port); /* * The firmware will not update the HPD status of other TypeC ports * that are active in DP-alt mode with their sink disconnected, until * this port is disabled and its PHY gets disconnected. Make sure this * happens in a timely manner by disconnecting the PHY synchronously. */ intel_tc_port_flush_work(dig_port); } int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc; enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(i915, port); if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) return -EINVAL; tc = kzalloc(sizeof(*tc), GFP_KERNEL); if (!tc) return -ENOMEM; dig_port->tc = tc; tc->dig_port = dig_port; if (DISPLAY_VER(i915) >= 14) tc->phy_ops = &xelpdp_tc_phy_ops; else if (DISPLAY_VER(i915) >= 13) tc->phy_ops = &adlp_tc_phy_ops; else if (DISPLAY_VER(i915) >= 12) tc->phy_ops = &tgl_tc_phy_ops; else tc->phy_ops = &icl_tc_phy_ops; snprintf(tc->port_name, sizeof(tc->port_name), "%c/TC#%d", port_name(port), tc_port + 1); mutex_init(&tc->lock); /* TODO: Combine the two works */ INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work); tc->legacy_port = is_legacy; tc->mode = TC_PORT_DISCONNECTED; tc->link_refcount = 0; tc_phy_init(tc); intel_tc_port_init_mode(dig_port); return 0; } void intel_tc_port_cleanup(struct intel_digital_port *dig_port) { intel_tc_port_suspend(dig_port); kfree(dig_port->tc); dig_port->tc = NULL; }
linux-master
drivers/gpu/drm/i915/display/intel_tc.c
// SPDX-License-Identifier: MIT /* * Copyright © 2022 Intel Corporation * * Read out the current hardware modeset state, and sanitize it to the current * state. */ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_state_helper.h> #include "i915_drv.h" #include "i915_reg.h" #include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_crtc_state_dump.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" #include "intel_pmdemand.h" #include "intel_tc.h" #include "intel_vblank.h" #include "intel_wm.h" #include "skl_watermark.h" static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; struct drm_atomic_state *state; struct intel_crtc *temp_crtc; enum pipe pipe = crtc->pipe; if (!crtc_state->hw.active) return; for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); if (plane_state->uapi.visible) intel_plane_disable_noatomic(crtc, plane); } state = drm_atomic_state_alloc(&i915->drm); if (!state) { drm_dbg_kms(&i915->drm, "failed to disable [CRTC:%d:%s], out of memory", crtc->base.base.id, crtc->base.name); return; } state->acquire_ctx = ctx; to_intel_atomic_state(state)->internal = true; /* Everything's already locked, -EDEADLK can't happen. */ for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, BIT(pipe) | intel_crtc_bigjoiner_slave_pipes(crtc_state)) { struct intel_crtc_state *temp_crtc_state = intel_atomic_get_crtc_state(state, temp_crtc); int ret; ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base); drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); } i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", crtc->base.base.id, crtc->base.name); crtc->active = false; crtc->base.enabled = false; if (crtc_state->shared_dpll) intel_unreference_shared_dpll_crtc(crtc, crtc_state->shared_dpll, &crtc_state->shared_dpll->state); } static void set_encoder_for_connector(struct intel_connector *connector, struct intel_encoder *encoder) { struct drm_connector_state *conn_state = connector->base.state; if (conn_state->crtc) drm_connector_put(&connector->base); if (encoder) { conn_state->best_encoder = &encoder->base; conn_state->crtc = encoder->base.crtc; drm_connector_get(&connector->base); } else { conn_state->best_encoder = NULL; conn_state->crtc = NULL; } } static void reset_encoder_connector_state(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_pmdemand_state *pmdemand_state = to_intel_pmdemand_state(i915->display.pmdemand.obj.state); struct intel_connector *connector; struct drm_connector_list_iter conn_iter; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.encoder != &encoder->base) continue; /* Clear the corresponding bit in pmdemand active phys mask */ intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, false); set_encoder_for_connector(connector, NULL); connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } drm_connector_list_iter_end(&conn_iter); } static void reset_crtc_encoder_state(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_encoder *encoder; for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) { reset_encoder_connector_state(encoder); encoder->base.crtc = NULL; } } static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_bw_state *bw_state = to_intel_bw_state(i915->display.bw.obj.state); struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_pmdemand_state *pmdemand_state = to_intel_pmdemand_state(i915->display.pmdemand.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); intel_crtc_state_reset(crtc_state, crtc); reset_crtc_encoder_state(crtc); intel_fbc_disable(crtc); intel_update_watermarks(i915); intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains); cdclk_state->min_cdclk[pipe] = 0; cdclk_state->min_voltage_level[pipe] = 0; cdclk_state->active_pipes &= ~BIT(pipe); dbuf_state->active_pipes &= ~BIT(pipe); bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0); } /* * Return all the pipes using a transcoder in @transcoder_mask. * For bigjoiner configs return only the bigjoiner master. */ static u8 get_transcoder_pipes(struct drm_i915_private *i915, u8 transcoder_mask) { struct intel_crtc *temp_crtc; u8 pipes = 0; for_each_intel_crtc(&i915->drm, temp_crtc) { struct intel_crtc_state *temp_crtc_state = to_intel_crtc_state(temp_crtc->base.state); if (temp_crtc_state->cpu_transcoder == INVALID_TRANSCODER) continue; if (intel_crtc_is_bigjoiner_slave(temp_crtc_state)) continue; if (transcoder_mask & BIT(temp_crtc_state->cpu_transcoder)) pipes |= BIT(temp_crtc->pipe); } return pipes; } /* * Return the port sync master and slave pipes linked to @crtc. * For bigjoiner configs return only the bigjoiner master pipes. */ static void get_portsync_pipes(struct intel_crtc *crtc, u8 *master_pipe_mask, u8 *slave_pipes_mask) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_crtc *master_crtc; struct intel_crtc_state *master_crtc_state; enum transcoder master_transcoder; if (!is_trans_port_sync_mode(crtc_state)) { *master_pipe_mask = BIT(crtc->pipe); *slave_pipes_mask = 0; return; } if (is_trans_port_sync_master(crtc_state)) master_transcoder = crtc_state->cpu_transcoder; else master_transcoder = crtc_state->master_transcoder; *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder)); drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask)); master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1); master_crtc_state = to_intel_crtc_state(master_crtc->base.state); *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask); } static u8 get_bigjoiner_slave_pipes(struct drm_i915_private *i915, u8 master_pipes_mask) { struct intel_crtc *master_crtc; u8 pipes = 0; for_each_intel_crtc_in_pipe_mask(&i915->drm, master_crtc, master_pipes_mask) { struct intel_crtc_state *master_crtc_state = to_intel_crtc_state(master_crtc->base.state); pipes |= intel_crtc_bigjoiner_slave_pipes(master_crtc_state); } return pipes; } static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); u8 portsync_master_mask; u8 portsync_slaves_mask; u8 bigjoiner_slaves_mask; struct intel_crtc *temp_crtc; /* TODO: Add support for MST */ get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask); bigjoiner_slaves_mask = get_bigjoiner_slave_pipes(i915, portsync_master_mask | portsync_slaves_mask); drm_WARN_ON(&i915->drm, portsync_master_mask & portsync_slaves_mask || portsync_master_mask & bigjoiner_slaves_mask || portsync_slaves_mask & bigjoiner_slaves_mask); for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, bigjoiner_slaves_mask) intel_crtc_disable_noatomic_begin(temp_crtc, ctx); for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask) intel_crtc_disable_noatomic_begin(temp_crtc, ctx); for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask) intel_crtc_disable_noatomic_begin(temp_crtc, ctx); for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, bigjoiner_slaves_mask | portsync_slaves_mask | portsync_master_mask) intel_crtc_disable_noatomic_complete(temp_crtc); } static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state = connector->base.state; struct intel_encoder *encoder = to_intel_encoder(connector->base.encoder); set_encoder_for_connector(connector, encoder); if (encoder) { struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; } } drm_connector_list_iter_end(&conn_iter); } static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) { if (intel_crtc_is_bigjoiner_slave(crtc_state)) return; crtc_state->uapi.enable = crtc_state->hw.enable; crtc_state->uapi.active = crtc_state->hw.active; drm_WARN_ON(crtc_state->uapi.crtc->dev, drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; /* assume 1:1 mapping */ drm_property_replace_blob(&crtc_state->hw.degamma_lut, crtc_state->pre_csc_lut); drm_property_replace_blob(&crtc_state->hw.gamma_lut, crtc_state->post_csc_lut); drm_property_replace_blob(&crtc_state->uapi.degamma_lut, crtc_state->hw.degamma_lut); drm_property_replace_blob(&crtc_state->uapi.gamma_lut, crtc_state->hw.gamma_lut); drm_property_replace_blob(&crtc_state->uapi.ctm, crtc_state->hw.ctm); } static void intel_sanitize_plane_mapping(struct drm_i915_private *i915) { struct intel_crtc *crtc; if (DISPLAY_VER(i915) >= 4) return; for_each_intel_crtc(&i915->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_crtc *plane_crtc; enum pipe pipe; if (!plane->get_hw_state(plane, &pipe)) continue; if (pipe == crtc->pipe) continue; drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); plane_crtc = intel_crtc_for_pipe(i915, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } static bool intel_crtc_has_encoders(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct intel_encoder *encoder; for_each_encoder_on_crtc(dev, &crtc->base, encoder) return true; return false; } static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct intel_encoder *encoder; for_each_encoder_on_crtc(dev, &crtc->base, encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); if (dig_port && intel_tc_port_link_needs_reset(dig_port)) return true; } return false; } static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; struct intel_connector *found_connector = NULL; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (&encoder->base == connector->base.encoder) { found_connector = connector; break; } } drm_connector_list_iter_end(&conn_iter); return found_connector; } static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * We start out with underrun reporting disabled on active * pipes to avoid races. * * Also on gmch platforms we dont have any hardware bits to * disable the underrun reporting. Which means we need to start * out with underrun reporting disabled also on inactive pipes, * since otherwise we'll complain about the garbage we read when * e.g. coming up after runtime pm. * * No protection against concurrent access is required - at * worst a fifo underrun happens which also sets this to false. */ intel_init_fifo_underrun_reporting(i915, crtc, !crtc_state->hw.active && !HAS_GMCH(i915)); } static bool intel_sanitize_crtc(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); bool needs_link_reset; if (crtc_state->hw.active) { struct intel_plane *plane; /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); if (plane_state->uapi.visible && plane->base.type != DRM_PLANE_TYPE_PRIMARY) intel_plane_disable_noatomic(crtc, plane); } /* Disable any background color/etc. set by the BIOS */ intel_color_commit_noarm(crtc_state); intel_color_commit_arm(crtc_state); } if (!crtc_state->hw.active || intel_crtc_is_bigjoiner_slave(crtc_state)) return false; needs_link_reset = intel_crtc_needs_link_reset(crtc); /* * Adjust the state of the output pipe according to whether we have * active connectors/encoders. */ if (!needs_link_reset && intel_crtc_has_encoders(crtc)) return false; intel_crtc_disable_noatomic(crtc, ctx); /* * The HPD state on other active/disconnected TC ports may be stuck in * the connected state until this port is disabled and a ~10ms delay has * passed, wait here for that so that sanitizing other CRTCs will see the * up-to-date HPD state. */ if (needs_link_reset) msleep(20); return true; } static void intel_sanitize_all_crtcs(struct drm_i915_private *i915, struct drm_modeset_acquire_ctx *ctx) { struct intel_crtc *crtc; u32 crtcs_forced_off = 0; /* * An active and disconnected TypeC port prevents the HPD live state * to get updated on other active/disconnected TypeC ports, so after * a port gets disabled the CRTCs using other TypeC ports must be * rechecked wrt. their link status. */ for (;;) { u32 old_mask = crtcs_forced_off; for_each_intel_crtc(&i915->drm, crtc) { u32 crtc_mask = drm_crtc_mask(&crtc->base); if (crtcs_forced_off & crtc_mask) continue; if (intel_sanitize_crtc(crtc, ctx)) crtcs_forced_off |= crtc_mask; } if (crtcs_forced_off == old_mask) break; } for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state"); } } static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram * the hardware when a high res displays plugged in. DPLL P * divider is zero, and the pipe timings are bonkers. We'll * try to disable everything in that case. * * FIXME would be nice to be able to sanitize this state * without several WARNs, but for now let's take the easy * road. */ return IS_SANDYBRIDGE(i915) && crtc_state->hw.active && crtc_state->shared_dpll && crtc_state->port_clock == 0; } static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_connector *connector; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc_state *crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; struct intel_pmdemand_state *pmdemand_state = to_intel_pmdemand_state(i915->display.pmdemand.obj.state); /* * We need to check both for a crtc link (meaning that the encoder is * active and trying to read from a pipe) and the pipe itself being * active. */ bool has_active_crtc = crtc_state && crtc_state->hw.active; if (crtc_state && has_bogus_dpll_config(crtc_state)) { drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the hardware. Disabling pipe %c\n", pipe_name(crtc->pipe)); has_active_crtc = false; } connector = intel_encoder_find_connector(encoder); if (connector && !has_active_crtc) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] has active connectors but no active pipe!\n", encoder->base.base.id, encoder->base.name); /* Clear the corresponding bit in pmdemand active phys mask */ intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, false); /* * Connector is active, but has no active pipe. This is fallout * from our resume register restoring. Disable the encoder * manually again. */ if (crtc_state) { struct drm_encoder *best_encoder; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); /* avoid oopsing in case the hooks consult best_encoder */ best_encoder = connector->base.state->best_encoder; connector->base.state->best_encoder = &encoder->base; /* FIXME NULL atomic state passed! */ if (encoder->disable) encoder->disable(NULL, encoder, crtc_state, connector->base.state); if (encoder->post_disable) encoder->post_disable(NULL, encoder, crtc_state, connector->base.state); connector->base.state->best_encoder = best_encoder; } encoder->base.crtc = NULL; /* * Inconsistent output/port/pipe state happens presumably due to * a bug in one of the get_hw_state functions. Or someplace else * in our code, like the register restore mess on resume. Clamp * things to off as a safer default. */ connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } /* notify opregion of the sanitized encoder state */ intel_opregion_notify_encoder(encoder, connector && has_active_crtc); if (HAS_DDI(i915)) intel_ddi_sanitize_encoder_pll_mapping(encoder); } /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct drm_i915_private *i915) { struct intel_plane *plane; struct intel_crtc *crtc; for_each_intel_plane(&i915->drm, plane) { struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); struct intel_crtc_state *crtc_state; enum pipe pipe = PIPE_A; bool visible; visible = plane->get_hw_state(plane, &pipe); crtc = intel_crtc_for_pipe(i915, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", plane->base.base.id, plane->base.name, str_enabled_disabled(visible), pipe_name(pipe)); } for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); intel_plane_fixup_bitmasks(crtc_state); } } static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) { struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_pmdemand_state *pmdemand_state = to_intel_pmdemand_state(i915->display.pmdemand.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; u8 active_pipes = 0; for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); intel_crtc_state_reset(crtc_state, crtc); intel_crtc_get_pipe_config(crtc_state); crtc_state->hw.enable = crtc_state->hw.active; crtc->base.enabled = crtc_state->hw.enable; crtc->active = crtc_state->hw.active; if (crtc_state->hw.active) active_pipes |= BIT(crtc->pipe); drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, str_enabled_disabled(crtc_state->hw.active)); } cdclk_state->active_pipes = active_pipes; dbuf_state->active_pipes = active_pipes; readout_plane_state(i915); for_each_intel_encoder(&i915->drm, encoder) { struct intel_crtc_state *crtc_state = NULL; pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { crtc = intel_crtc_for_pipe(i915, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; intel_encoder_get_config(encoder, crtc_state); /* read out to slave crtc as well for bigjoiner */ if (crtc_state->bigjoiner_pipes) { struct intel_crtc *slave_crtc; /* encoder should read be linked to bigjoiner master */ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, intel_crtc_bigjoiner_slave_pipes(crtc_state)) { struct intel_crtc_state *slave_crtc_state; slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state); intel_encoder_get_config(encoder, slave_crtc_state); } } intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, true); } else { intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, false); encoder->base.crtc = NULL; } if (encoder->sync_state) encoder->sync_state(encoder, crtc_state); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", encoder->base.base.id, encoder->base.name, str_enabled_disabled(encoder->base.crtc), pipe_name(pipe)); } intel_dpll_readout_hw_state(i915); drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->get_hw_state(connector)) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; connector->base.dpms = DRM_MODE_DPMS_ON; encoder = intel_attached_encoder(connector); connector->base.encoder = &encoder->base; crtc = to_intel_crtc(encoder->base.crtc); crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; if (crtc_state && crtc_state->hw.active) { /* * This has to be done during hardware readout * because anything calling .crtc_disable may * rely on the connector_mask being accurate. */ crtc_state->uapi.connector_mask |= drm_connector_mask(&connector->base); crtc_state->uapi.encoder_mask |= drm_encoder_mask(&encoder->base); } } else { connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] hw state readout: %s\n", connector->base.base.id, connector->base.name, str_enabled_disabled(connector->base.encoder)); } drm_connector_list_iter_end(&conn_iter); for_each_intel_crtc(&i915->drm, crtc) { struct intel_bw_state *bw_state = to_intel_bw_state(i915->display.bw.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; int min_cdclk = 0; if (crtc_state->hw.active) { /* * The initial mode needs to be set in order to keep * the atomic core happy. It wants a valid mode if the * crtc's enabled, so we do the above call. * * But we don't set all the derived state fully, hence * set a flag to indicate that a full recalculation is * needed on the next commit. */ crtc_state->inherited = true; intel_crtc_update_active_timings(crtc_state, crtc_state->vrr.enable); intel_crtc_copy_hw_to_uapi_state(crtc_state); } for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); /* * FIXME don't have the fb yet, so can't * use intel_plane_data_rate() :( */ if (plane_state->uapi.visible) crtc_state->data_rate[plane->id] = 4 * crtc_state->pixel_rate; /* * FIXME don't have the fb yet, so can't * use plane->min_cdclk() :( */ if (plane_state->uapi.visible && plane->min_cdclk) { if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10) crtc_state->min_cdclk[plane->id] = DIV_ROUND_UP(crtc_state->pixel_rate, 2); else crtc_state->min_cdclk[plane->id] = crtc_state->pixel_rate; } drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] min_cdclk %d kHz\n", plane->base.base.id, plane->base.name, crtc_state->min_cdclk[plane->id]); } if (crtc_state->hw.active) { min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); if (drm_WARN_ON(&i915->drm, min_cdclk < 0)) min_cdclk = 0; } cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; cdclk_state->min_voltage_level[crtc->pipe] = crtc_state->min_voltage_level; intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, crtc_state->port_clock); intel_bw_crtc_update(bw_state, crtc_state); } intel_pmdemand_init_pmdemand_params(i915, pmdemand_state); } static void get_encoder_power_domains(struct drm_i915_private *i915) { struct intel_encoder *encoder; for_each_intel_encoder(&i915->drm, encoder) { struct intel_crtc_state *crtc_state; if (!encoder->get_power_domains) continue; /* * MST-primary and inactive encoders don't have a crtc state * and neither of these require any power domain references. */ if (!encoder->base.crtc) continue; crtc_state = to_intel_crtc_state(encoder->base.crtc->state); encoder->get_power_domains(encoder, crtc_state); } } static void intel_early_display_was(struct drm_i915_private *i915) { /* * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl * Also known as Wa_14010480278. */ if (IS_DISPLAY_VER(i915, 10, 12)) intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS); /* * WaRsPkgCStateDisplayPMReq:hsw * System hang if this isn't done before disabling all planes! */ if (IS_HASWELL(i915)) intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES); if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { /* Display WA #1142:kbl,cfl,cml */ intel_de_rmw(i915, CHICKEN_PAR1_1, KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22); intel_de_rmw(i915, CHICKEN_MISC_2, KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14, KBL_ARB_FILL_SPARE_14); } } void intel_modeset_setup_hw_state(struct drm_i915_private *i915, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); intel_early_display_was(i915); intel_modeset_readout_hw_state(i915); /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(i915); intel_pch_sanitize(i915); /* * intel_sanitize_plane_mapping() may need to do vblank * waits, so we need vblank interrupts restored beforehand. */ for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); intel_sanitize_fifo_underrun_reporting(crtc_state); drm_crtc_vblank_reset(&crtc->base); if (crtc_state->hw.active) { intel_dmc_enable_pipe(i915, crtc->pipe); intel_crtc_vblank_on(crtc_state); } } intel_fbc_sanitize(i915); intel_sanitize_plane_mapping(i915); for_each_intel_encoder(&i915->drm, encoder) intel_sanitize_encoder(encoder); /* * Sanitizing CRTCs needs their connector atomic state to be * up-to-date, so ensure that already here. */ intel_modeset_update_connector_atomic_state(i915); intel_sanitize_all_crtcs(i915, ctx); intel_dpll_sanitize_state(i915); intel_wm_get_hw_state(i915); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_power_domain_mask put_domains; intel_modeset_get_crtc_power_domains(crtc_state, &put_domains); if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM))) intel_modeset_put_crtc_power_domains(crtc, &put_domains); } intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); intel_power_domains_sanitize_state(i915); }
linux-master
drivers/gpu/drm/i915/display/intel_modeset_setup.c
// SPDX-License-Identifier: MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/drm_crtc.h> #include <drm/drm_vblank.h> #include "gt/intel_rps.h" #include "i915_drv.h" #include "intel_display_rps.h" #include "intel_display_types.h" struct wait_rps_boost { struct wait_queue_entry wait; struct drm_crtc *crtc; struct i915_request *request; }; static int do_rps_boost(struct wait_queue_entry *_wait, unsigned mode, int sync, void *key) { struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); struct i915_request *rq = wait->request; /* * If we missed the vblank, but the request is already running it * is reasonable to assume that it will complete before the next * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) intel_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); list_del(&wait->wait.entry); kfree(wait); return 1; } void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, struct dma_fence *fence) { struct wait_rps_boost *wait; if (!dma_fence_is_i915(fence)) return; if (DISPLAY_VER(to_i915(crtc->dev)) < 6) return; if (drm_crtc_vblank_get(crtc)) return; wait = kmalloc(sizeof(*wait), GFP_KERNEL); if (!wait) { drm_crtc_vblank_put(crtc); return; } wait->request = to_request(dma_fence_get(fence)); wait->crtc = crtc; wait->wait.func = do_rps_boost; wait->wait.flags = 0; add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); } void intel_display_rps_mark_interactive(struct drm_i915_private *i915, struct intel_atomic_state *state, bool interactive) { if (state->rps_interactive == interactive) return; intel_rps_mark_interactive(&to_gt(i915)->rps, interactive); state->rps_interactive = interactive; }
linux-master
drivers/gpu/drm/i915/display/intel_display_rps.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation * * DisplayPort support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code). */ #include <linux/string_helpers.h> #include "g4x_dp.h" #include "i915_reg.h" #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_pch_display.h" #include "intel_pps.h" #include "vlv_sideband.h" static const struct dpll g4x_dpll[] = { { .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, }, { .dot = 270000, .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2, }, }; static const struct dpll pch_dpll[] = { { .dot = 162000, .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9, }, { .dot = 270000, .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8, }, }; static const struct dpll vlv_dpll[] = { { .dot = 162000, .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81, }, { .dot = 270000, .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27, }, }; static const struct dpll chv_dpll[] = { /* m2 is .22 binary fixed point */ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ }, }; const struct dpll *vlv_get_dpll(struct drm_i915_private *i915) { return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0]; } void g4x_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct dpll *divisor = NULL; int i, count = 0; if (IS_G4X(dev_priv)) { divisor = g4x_dpll; count = ARRAY_SIZE(g4x_dpll); } else if (HAS_PCH_SPLIT(dev_priv)) { divisor = pch_dpll; count = ARRAY_SIZE(pch_dpll); } else if (IS_CHERRYVIEW(dev_priv)) { divisor = chv_dpll; count = ARRAY_SIZE(chv_dpll); } else if (IS_VALLEYVIEW(dev_priv)) { divisor = vlv_dpll; count = ARRAY_SIZE(vlv_dpll); } if (divisor && count) { for (i = 0; i < count; i++) { if (pipe_config->port_clock == divisor[i].dot) { pipe_config->dpll = divisor[i]; pipe_config->clock_set = true; break; } } } } static void intel_dp_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; intel_dp_set_link_params(intel_dp, pipe_config->port_clock, pipe_config->lane_count); /* * There are four kinds of DP registers: * IBX PCH * SNB CPU * IVB CPU * CPT PCH * * IBX PCH and CPU are the same for almost everything, * except that the CPU DP PLL is configured in this * register * * CPT PCH is quite different, having many bits moved * to the TRANS_DP_CTL register instead. That * configuration happens (oddly) in ilk_pch_enable */ /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; /* Handle DP bits in common between all three register formats */ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); /* Split out the IBX/CPU vs CPT settings */ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) intel_dp->DP |= DP_ENHANCED_FRAMING; intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), TRANS_DP_ENH_FRAMING, drm_dp_enhanced_frame_cap(intel_dp->dpcd) ? TRANS_DP_ENH_FRAMING : 0); } else { if (IS_G4X(dev_priv) && pipe_config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; intel_dp->DP |= DP_LINK_TRAIN_OFF; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) intel_dp->DP |= DP_ENHANCED_FRAMING; if (IS_CHERRYVIEW(dev_priv)) intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); else intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); } } static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; I915_STATE_WARN(dev_priv, cur_state != state, "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", dig_port->base.base.base.id, dig_port->base.base.name, str_on_off(state), str_on_off(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) { bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; I915_STATE_WARN(dev_priv, cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", str_on_off(state), str_on_off(cur_state)); } #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) static void ilk_edp_pll_on(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", pipe_config->port_clock); intel_dp->DP &= ~DP_PLL_FREQ_MASK; if (pipe_config->port_clock == 162000) intel_dp->DP |= DP_PLL_FREQ_162MHZ; else intel_dp->DP |= DP_PLL_FREQ_270MHZ; intel_de_write(dev_priv, DP_A, intel_dp->DP); intel_de_posting_read(dev_priv, DP_A); udelay(500); /* * [DevILK] Work around required when enabling DP PLL * while a pipe is enabled going to FDI: * 1. Wait for the start of vertical blank on the enabled pipe going to FDI * 2. Program DP PLL enable */ if (IS_IRONLAKE(dev_priv)) intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); intel_dp->DP |= DP_PLL_ENABLE; intel_de_write(dev_priv, DP_A, intel_dp->DP); intel_de_posting_read(dev_priv, DP_A); udelay(200); } static void ilk_edp_pll_off(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); intel_dp->DP &= ~DP_PLL_ENABLE; intel_de_write(dev_priv, DP_A, intel_dp->DP); intel_de_posting_read(dev_priv, DP_A); udelay(200); } static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, enum port port, enum pipe *pipe) { enum pipe p; for_each_pipe(dev_priv, p) { u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { *pipe = p; return true; } } drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", port_name(port)); /* must initialize pipe to something for the asserts */ *pipe = PIPE_A; return false; } bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t dp_reg, enum port port, enum pipe *pipe) { bool ret; u32 val; val = intel_de_read(dev_priv, dp_reg); ret = val & DP_PORT_EN; /* asserts want to know the pipe even if the port is disabled */ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) ret &= cpt_dp_port_selected(dev_priv, port, pipe); else if (IS_CHERRYVIEW(dev_priv)) *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; else *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; return ret; } static bool intel_dp_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_wakeref_t wakeref; bool ret; wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); if (!wakeref) return false; ret = g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, encoder->port, pipe); intel_display_power_put(dev_priv, encoder->power_domain, wakeref); return ret; } static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->has_pch_encoder) { intel_pch_transcoder_get_m1_n1(crtc, &crtc_state->dp_m_n); intel_pch_transcoder_get_m2_n2(crtc, &crtc_state->dp_m2_n2); } else { intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, &crtc_state->dp_m_n); intel_cpu_transcoder_get_m2_n2(crtc, crtc_state->cpu_transcoder, &crtc_state->dp_m2_n2); } } static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 tmp, flags = 0; enum port port = encoder->port; struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); if (encoder->type == INTEL_OUTPUT_EDP) pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); else pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); tmp = intel_de_read(dev_priv, intel_dp->output_reg); pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { u32 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; } else { if (tmp & DP_SYNC_HS_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else flags |= DRM_MODE_FLAG_NHSYNC; if (tmp & DP_SYNC_VS_HIGH) flags |= DRM_MODE_FLAG_PVSYNC; else flags |= DRM_MODE_FLAG_NVSYNC; } pipe_config->hw.adjusted_mode.flags |= flags; if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; pipe_config->lane_count = ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; g4x_dp_get_m_n(pipe_config); if (port == PORT_A) { if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) pipe_config->port_clock = 162000; else pipe_config->port_clock = 270000; } pipe_config->hw.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); if (intel_dp_is_edp(intel_dp)) intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp); intel_audio_codec_get_config(encoder, pipe_config); } static void intel_dp_link_down(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; if (drm_WARN_ON(&dev_priv->drm, (intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN) == 0)) return; drm_dbg_kms(&dev_priv->drm, "\n"); if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; } else { intel_dp->DP &= ~DP_LINK_TRAIN_MASK; intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE; } intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); /* * HW workaround for IBX, we need to move the port * to transcoder A after disabling it to allow the * matching HDMI port to be enabled on transcoder A. */ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { /* * We get CPU/PCH FIFO underruns on the other pipe when * doing the workaround. Sweep them under the rug. */ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); /* always enable with pattern 1 (as per spec) */ intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | DP_LINK_TRAIN_PAT_1; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_dp->DP &= ~DP_PORT_EN; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } msleep(intel_dp->pps.panel_power_down_delay); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t wakeref; with_intel_pps_lock(intel_dp, wakeref) intel_dp->pps.active_pipe = INVALID_PIPE; } } static void intel_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp->link_trained = false; intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); /* * Make sure the panel is off before trying to change the mode. * But also ensure that we have vdd while we switch off the panel. */ intel_pps_vdd_on(intel_dp); intel_edp_backlight_off(old_conn_state); intel_dp_set_power(intel_dp, DP_SET_POWER_D3); intel_pps_off(intel_dp); } static void g4x_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); } static void vlv_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); } static void g4x_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; /* * Bspec does not list a specific disable sequence for g4x DP. * Follow the ilk+ sequence (disable pipe before the port) for * g4x DP as it does not suffer from underruns like the normal * g4x modeset sequence (disable pipe after the port). */ intel_dp_link_down(encoder, old_crtc_state); /* Only ilk+ has port A */ if (port == PORT_A) ilk_edp_pll_off(intel_dp, old_crtc_state); } static void vlv_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { intel_dp_link_down(encoder, old_crtc_state); } static void chv_post_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_dp_link_down(encoder, old_crtc_state); vlv_dpio_get(dev_priv); /* Assert data lane reset */ chv_data_lane_soft_reset(encoder, old_crtc_state, true); vlv_dpio_put(dev_priv); } static void cpt_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; break; case DP_TRAINING_PATTERN_1: intel_dp->DP |= DP_LINK_TRAIN_PAT_1_CPT; break; case DP_TRAINING_PATTERN_2: intel_dp->DP |= DP_LINK_TRAIN_PAT_2_CPT; break; default: MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat)); return; } intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } static void g4x_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_dp->DP &= ~DP_LINK_TRAIN_MASK; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: intel_dp->DP |= DP_LINK_TRAIN_OFF; break; case DP_TRAINING_PATTERN_1: intel_dp->DP |= DP_LINK_TRAIN_PAT_1; break; case DP_TRAINING_PATTERN_2: intel_dp->DP |= DP_LINK_TRAIN_PAT_2; break; default: MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat)); return; } intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } static void intel_dp_enable_port(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* enable with pattern 1 (as per spec) */ intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_1); /* * Magic for VLV/CHV. We _must_ first set up the register * without actually enabling the port, and then do another * write to enable the port. Otherwise link training will * fail when the power sequencer is freshly used for this port. */ intel_dp->DP |= DP_PORT_EN; if (crtc_state->has_audio) intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } static void intel_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); intel_wakeref_t wakeref; if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) return; with_intel_pps_lock(intel_dp, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_pps_init(encoder, pipe_config); intel_dp_enable_port(intel_dp, pipe_config); intel_pps_vdd_on_unlocked(intel_dp); intel_pps_on_unlocked(intel_dp); intel_pps_vdd_off_unlocked(intel_dp, true); } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { unsigned int lane_mask = 0x0; if (IS_CHERRYVIEW(dev_priv)) lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), lane_mask); } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, pipe_config); intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, pipe_config); intel_dp_start_link_train(intel_dp, pipe_config); intel_dp_stop_link_train(intel_dp, pipe_config); } static void g4x_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_enable_dp(state, encoder, pipe_config, conn_state); intel_audio_codec_enable(encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } static void vlv_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_audio_codec_enable(encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } static void g4x_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; intel_dp_prepare(encoder, pipe_config); /* Only ilk+ has port A */ if (port == PORT_A) ilk_edp_pll_on(intel_dp, pipe_config); } static void vlv_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { vlv_phy_pre_encoder_enable(encoder, pipe_config); intel_enable_dp(state, encoder, pipe_config, conn_state); } static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_dp_prepare(encoder, pipe_config); vlv_phy_pre_pll_enable(encoder, pipe_config); } static void chv_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { chv_phy_pre_encoder_enable(encoder, pipe_config); intel_enable_dp(state, encoder, pipe_config, conn_state); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); } static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { intel_dp_prepare(encoder, pipe_config); chv_phy_pre_pll_enable(encoder, pipe_config); } static void chv_dp_post_pll_disable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { chv_phy_post_pll_disable(encoder, old_crtc_state); } static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; } static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp) { return DP_TRAIN_PRE_EMPH_LEVEL_2; } static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) { return DP_TRAIN_PRE_EMPH_LEVEL_3; } static void vlv_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: preemph_reg_value = 0x0004000; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: demph_reg_value = 0x2B405555; uniqtranscale_reg_value = 0x552AB83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: demph_reg_value = 0x2B404040; uniqtranscale_reg_value = 0x5548B83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: demph_reg_value = 0x2B245555; uniqtranscale_reg_value = 0x5560B83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: demph_reg_value = 0x2B405555; uniqtranscale_reg_value = 0x5598DA3A; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_1: preemph_reg_value = 0x0002000; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: demph_reg_value = 0x2B404040; uniqtranscale_reg_value = 0x5552B83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: demph_reg_value = 0x2B404848; uniqtranscale_reg_value = 0x5580B83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: demph_reg_value = 0x2B404040; uniqtranscale_reg_value = 0x55ADDA3A; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_2: preemph_reg_value = 0x0000000; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: demph_reg_value = 0x2B305555; uniqtranscale_reg_value = 0x5570B83A; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: demph_reg_value = 0x2B2B4040; uniqtranscale_reg_value = 0x55ADDA3A; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_3: preemph_reg_value = 0x0006000; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: demph_reg_value = 0x1B405555; uniqtranscale_reg_value = 0x55ADDA3A; break; default: return; } break; default: return; } vlv_set_phy_signal_level(encoder, crtc_state, demph_reg_value, preemph_reg_value, uniqtranscale_reg_value, 0); } static void chv_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 deemph_reg_value, margin_reg_value; bool uniq_trans_scale = false; u8 train_set = intel_dp->train_set[0]; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: deemph_reg_value = 128; margin_reg_value = 52; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: deemph_reg_value = 128; margin_reg_value = 77; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: deemph_reg_value = 128; margin_reg_value = 102; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: deemph_reg_value = 128; margin_reg_value = 154; uniq_trans_scale = true; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_1: switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: deemph_reg_value = 85; margin_reg_value = 78; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: deemph_reg_value = 85; margin_reg_value = 116; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: deemph_reg_value = 85; margin_reg_value = 154; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_2: switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: deemph_reg_value = 64; margin_reg_value = 104; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: deemph_reg_value = 64; margin_reg_value = 154; break; default: return; } break; case DP_TRAIN_PRE_EMPH_LEVEL_3: switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: deemph_reg_value = 43; margin_reg_value = 154; break; default: return; } break; default: return; } chv_set_phy_signal_level(encoder, crtc_state, deemph_reg_value, margin_reg_value, uniq_trans_scale); } static u32 g4x_signal_levels(u8 train_set) { u32 signal_levels = 0; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: default: signal_levels |= DP_VOLTAGE_0_4; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: signal_levels |= DP_VOLTAGE_0_6; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: signal_levels |= DP_VOLTAGE_0_8; break; case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: signal_levels |= DP_VOLTAGE_1_2; break; } switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: default: signal_levels |= DP_PRE_EMPHASIS_0; break; case DP_TRAIN_PRE_EMPH_LEVEL_1: signal_levels |= DP_PRE_EMPHASIS_3_5; break; case DP_TRAIN_PRE_EMPH_LEVEL_2: signal_levels |= DP_PRE_EMPHASIS_6; break; case DP_TRAIN_PRE_EMPH_LEVEL_3: signal_levels |= DP_PRE_EMPHASIS_9_5; break; } return signal_levels; } static void g4x_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = g4x_signal_levels(train_set); drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); intel_dp->DP |= signal_levels; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } /* SNB CPU eDP voltage swing and pre-emphasis control */ static u32 snb_cpu_edp_signal_levels(u8 train_set) { u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; default: MISSING_CASE(signal_levels); return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; } } static void snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = snb_cpu_edp_signal_levels(train_set); drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; intel_dp->DP |= signal_levels; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } /* IVB CPU eDP voltage swing and pre-emphasis control */ static u32 ivb_cpu_edp_signal_levels(u8 train_set) { u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_400MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_400MV_3_5DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: return EDP_LINK_TRAIN_400MV_6DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_600MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_600MV_3_5DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: return EDP_LINK_TRAIN_800MV_0DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_800MV_3_5DB_IVB; default: MISSING_CASE(signal_levels); return EDP_LINK_TRAIN_500MV_0DB_IVB; } } static void ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; signal_levels = ivb_cpu_edp_signal_levels(train_set); drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; intel_dp->DP |= signal_levels; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } /* * If display is now connected check links status, * there has been known issues of link loss triggering * long pulse. * * Some sinks (eg. ASUS PB287Q) seem to perform some * weird HPD ping pong during modesets. So we can apparently * end up with HPD going low during a modeset, and then * going back up soon after. And once that happens we must * retrain the link to get a picture. That's in case no * userspace component reacted to intermittent HPD dip. */ static enum intel_hotplug_state intel_dp_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; if (intel_dp->compliance.test_active && intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { intel_dp_phy_test(encoder); /* just do the PHY test and nothing else */ return INTEL_HOTPLUG_UNCHANGED; } state = intel_encoder_hotplug(encoder, connector); drm_modeset_acquire_init(&ctx, 0); for (;;) { ret = intel_dp_retrain_link(encoder, &ctx); if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); continue; } break; } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); drm_WARN(encoder->base.dev, ret, "Acquiring modeset locks failed with %i\n", ret); /* * Keeping it consistent with intel_ddi_hotplug() and * intel_hdmi_hotplug(). */ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) state = INTEL_HOTPLUG_RETRY; return state; } static bool ibx_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } static bool g4x_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit; switch (encoder->hpd_pin) { case HPD_PORT_B: bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; break; case HPD_PORT_C: bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; break; case HPD_PORT_D: bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; break; default: MISSING_CASE(encoder->hpd_pin); return false; } return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; } static bool ilk_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } static void intel_dp_encoder_destroy(struct drm_encoder *encoder) { intel_dp_encoder_flush_work(encoder); drm_encoder_cleanup(encoder); kfree(enc_to_dig_port(to_intel_encoder(encoder))); } enum pipe vlv_active_pipe(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum pipe pipe; if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, encoder->port, &pipe)) return pipe; return INVALID_PIPE; } static void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); intel_dp->reset_link_params = true; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t wakeref; with_intel_pps_lock(intel_dp, wakeref) intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); } intel_pps_encoder_reset(intel_dp); } static const struct drm_encoder_funcs intel_dp_enc_funcs = { .reset = intel_dp_encoder_reset, .destroy = intel_dp_encoder_destroy, }; bool g4x_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; if (!assert_port_valid(dev_priv, port)) return false; devdata = intel_bios_encoder_data_lookup(dev_priv, port); /* FIXME bail? */ if (!devdata) drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n", port_name(port)); dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return false; dig_port->aux_ch = AUX_CH_NONE; intel_connector = intel_connector_alloc(); if (!intel_connector) goto err_connector_alloc; intel_encoder = &dig_port->base; encoder = &intel_encoder->base; intel_encoder->devdata = devdata; mutex_init(&dig_port->hdcp_mutex); if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) goto err_encoder_init; intel_encoder->hotplug = intel_dp_hotplug; intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; intel_encoder->sync_state = intel_dp_sync_state; intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; intel_encoder->update_pipe = intel_backlight_update; intel_encoder->suspend = intel_dp_encoder_suspend; intel_encoder->shutdown = intel_dp_encoder_shutdown; if (IS_CHERRYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = chv_post_disable_dp; intel_encoder->post_pll_disable = chv_dp_post_pll_disable; } else if (IS_VALLEYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; intel_encoder->pre_enable = vlv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = vlv_post_disable_dp; } else { intel_encoder->pre_enable = g4x_pre_enable_dp; intel_encoder->enable = g4x_enable_dp; intel_encoder->disable = g4x_disable_dp; intel_encoder->post_disable = g4x_post_disable_dp; } if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) dig_port->dp.set_link_train = cpt_set_link_train; else dig_port->dp.set_link_train = g4x_set_link_train; if (IS_CHERRYVIEW(dev_priv)) intel_encoder->set_signal_levels = chv_set_signal_levels; else if (IS_VALLEYVIEW(dev_priv)) intel_encoder->set_signal_levels = vlv_set_signal_levels; else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels; else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A) intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels; else intel_encoder->set_signal_levels = g4x_set_signal_levels; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { dig_port->dp.preemph_max = intel_dp_preemph_max_3; dig_port->dp.voltage_max = intel_dp_voltage_max_3; } else { dig_port->dp.preemph_max = intel_dp_preemph_max_2; dig_port->dp.voltage_max = intel_dp_voltage_max_2; } dig_port->dp.output_reg = output_reg; dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) intel_encoder->pipe_mask = BIT(PIPE_C); else intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 0; intel_encoder->port = port; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); dig_port->hpd_pulse = intel_dp_hpd_pulse; if (HAS_GMCH(dev_priv)) { dig_port->connected = g4x_digital_port_connected; } else { if (port == PORT_A) dig_port->connected = ilk_digital_port_connected; else dig_port->connected = ibx_digital_port_connected; } if (port != PORT_A) intel_infoframe_init(dig_port); dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); if (dig_port->aux_ch == AUX_CH_NONE) goto err_init_connector; if (!intel_dp_init_connector(dig_port, intel_connector)) goto err_init_connector; return true; err_init_connector: drm_encoder_cleanup(encoder); err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(dig_port); return false; }
linux-master
drivers/gpu/drm/i915/display/g4x_dp.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <linux/backlight.h> #include <linux/kernel.h> #include <linux/pwm.h> #include <linux/string_helpers.h> #include <acpi/video.h> #include "i915_reg.h" #include "intel_backlight.h" #include "intel_backlight_regs.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" #include "intel_pci_config.h" #include "intel_pps.h" #include "intel_quirks.h" /** * scale - scale values from one range to another * @source_val: value in range [@source_min..@source_max] * @source_min: minimum legal value for @source_val * @source_max: maximum legal value for @source_val * @target_min: corresponding target value for @source_min * @target_max: corresponding target value for @source_max * * Return @source_val in range [@source_min..@source_max] scaled to range * [@target_min..@target_max]. */ static u32 scale(u32 source_val, u32 source_min, u32 source_max, u32 target_min, u32 target_max) { u64 target_val; WARN_ON(source_min > source_max); WARN_ON(target_min > target_max); /* defensive */ source_val = clamp(source_val, source_min, source_max); /* avoid overflows */ target_val = mul_u32_u32(source_val - source_min, target_max - target_min); target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); target_val += target_min; return target_val; } /* * Scale user_level in range [0..user_max] to [0..hw_max], clamping the result * to [hw_min..hw_max]. */ static u32 clamp_user_to_hw(struct intel_connector *connector, u32 user_level, u32 user_max) { struct intel_panel *panel = &connector->panel; u32 hw_level; hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max); hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max); return hw_level; } /* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */ static u32 scale_hw_to_user(struct intel_connector *connector, u32 hw_level, u32 user_max) { struct intel_panel *panel = &connector->panel; return scale(hw_level, panel->backlight.min, panel->backlight.max, 0, user_max); } u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); if (i915->params.invert_brightness < 0) return val; if (i915->params.invert_brightness > 0 || intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) { return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; } return val; } void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n", connector->base.base.id, connector->base.name, val); panel->backlight.pwm_funcs->set(conn_state, val); } u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_WARN_ON_ONCE(&i915->drm, panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); val = scale(val, panel->backlight.min, panel->backlight.max, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); return intel_backlight_invert_pwm_level(connector, val); } u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_WARN_ON_ONCE(&i915->drm, panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); if (i915->params.invert_brightness > 0 || (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, panel->backlight.min, panel->backlight.max); } static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); return intel_de_read(i915, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); return intel_de_read(i915, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 val; val = intel_de_read(i915, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (DISPLAY_VER(i915) < 4) val >>= 1; if (panel->backlight.combination_mode) { u8 lbpc; pci_read_config_byte(to_pci_dev(i915->drm.dev), LBPC, &lbpc); val *= lbpc; } return val; } static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B)) return 0; return intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; return intel_de_read(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller)); } static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused) { struct intel_panel *panel = &connector->panel; struct pwm_state state; pwm_get_state(panel->backlight.pwm, &state); return pwm_get_relative_duty_cycle(&state, 100); } static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); u32 val; val = intel_de_read(i915, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; intel_de_write(i915, BLC_PWM_PCH_CTL2, val | level); } static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); u32 tmp; tmp = intel_de_read(i915, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; intel_de_write(i915, BLC_PWM_CPU_CTL, tmp | level); } static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 tmp, mask; drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); if (panel->backlight.combination_mode) { u8 lbpc; lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1; level /= lbpc; pci_write_config_byte(to_pci_dev(i915->drm.dev), LBPC, lbpc); } if (DISPLAY_VER(i915) == 4) { mask = BACKLIGHT_DUTY_CYCLE_MASK; } else { level <<= 1; mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV; } tmp = intel_de_read(i915, BLC_PWM_CTL) & ~mask; intel_de_write(i915, BLC_PWM_CTL, tmp | level); } static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; u32 tmp; tmp = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), tmp | level); } static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; intel_de_write(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller), level); } static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n", connector->base.base.id, connector->base.name, level); panel->backlight.funcs->set(conn_state, level); } /* set backlight brightness to level in range [0..max], assuming hw min is * respected. */ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, u32 user_level, u32 user_max) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 hw_level; /* * Lack of crtc may occur during driver init because * connection_mutex isn't held across the entire backlight * setup + modeset readout, and the BIOS can issue the * requests at any time. */ if (!panel->backlight.present || !conn_state->crtc) return; mutex_lock(&i915->display.backlight.lock); drm_WARN_ON(&i915->drm, panel->backlight.max == 0); hw_level = clamp_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; if (panel->backlight.device) panel->backlight.device->props.brightness = scale_hw_to_user(connector, panel->backlight.level, panel->backlight.device->props.max_brightness); if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); mutex_unlock(&i915->display.backlight.lock); } static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); u32 tmp; intel_backlight_set_pwm_level(old_conn_state, level); /* * Although we don't support or enable CPU PWM with LPT/SPT based * systems, it may have been enabled prior to loading the * driver. Disable to avoid warnings on LCPLL disable. * * This needs rework if we need to add support for CPU PWM on PCH split * platforms. */ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (tmp & BLM_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n", connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); } intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); intel_backlight_set_pwm_level(old_conn_state, val); intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0); intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { intel_backlight_set_pwm_level(old_conn_state, val); } static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev); intel_backlight_set_pwm_level(old_conn_state, val); intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0); } static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; intel_backlight_set_pwm_level(old_conn_state, val); intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0); } static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; intel_backlight_set_pwm_level(old_conn_state, val); intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), BXT_BLC_PWM_ENABLE, 0); if (panel->backlight.controller == 1) intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0); } static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; intel_backlight_set_pwm_level(old_conn_state, val); intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), BXT_BLC_PWM_ENABLE, 0); } static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; intel_backlight_set_pwm_level(old_conn_state, level); panel->backlight.pwm_state.enabled = false; pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } void intel_backlight_disable(const struct drm_connector_state *old_conn_state) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (!panel->backlight.present) return; /* * Do not disable backlight on the vga_switcheroo path. When switching * away from i915, the other client may depend on i915 to handle the * backlight. This will leave the backlight on unnecessarily when * another client is not activated. */ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n", connector->base.base.id, connector->base.name); return; } mutex_lock(&i915->display.backlight.lock); if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_POWERDOWN; panel->backlight.enabled = false; panel->backlight.funcs->disable(old_conn_state, 0); mutex_unlock(&i915->display.backlight.lock); } static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pch_ctl1, pch_ctl2; pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } if (HAS_PCH_LPT(i915)) intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY, panel->backlight.alternate_pwm_increment ? LPT_PWM_GRANULARITY : 0); else intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY, panel->backlight.alternate_pwm_increment ? SPT_PWM_GRANULARITY : 0); pch_ctl2 = panel->backlight.pwm_level_max << 16; intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2); pch_ctl1 = 0; if (panel->backlight.active_low_pwm) pch_ctl1 |= BLM_PCH_POLARITY; /* After LPT, override is the default. */ if (HAS_PCH_LPT(i915)) pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); intel_de_posting_read(i915, BLC_PWM_PCH_CTL1); intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); /* This won't stick until the above enable. */ intel_backlight_set_pwm_level(conn_state, level); } static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 cpu_ctl2, pch_ctl1, pch_ctl2; cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n", connector->base.base.id, connector->base.name); cpu_ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2); } pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } if (cpu_transcoder == TRANSCODER_EDP) cpu_ctl2 = BLM_TRANSCODER_EDP; else cpu_ctl2 = BLM_PIPE(cpu_transcoder); intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2); intel_de_posting_read(i915, BLC_PWM_CPU_CTL2); intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); /* This won't stick until the above enable. */ intel_backlight_set_pwm_level(conn_state, level); pch_ctl2 = panel->backlight.pwm_level_max << 16; intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2); pch_ctl1 = 0; if (panel->backlight.active_low_pwm) pch_ctl1 |= BLM_PCH_POLARITY; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); intel_de_posting_read(i915, BLC_PWM_PCH_CTL1); intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); } static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, freq; ctl = intel_de_read(i915, BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CTL, 0); } freq = panel->backlight.pwm_level_max; if (panel->backlight.combination_mode) freq /= 0xff; ctl = freq << 17; if (panel->backlight.combination_mode) ctl |= BLM_LEGACY_MODE; if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; intel_de_write(i915, BLC_PWM_CTL, ctl); intel_de_posting_read(i915, BLC_PWM_CTL); /* XXX: combine this into above write? */ intel_backlight_set_pwm_level(conn_state, level); /* * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 * that has backlight. */ if (DISPLAY_VER(i915) == 2) intel_de_write(i915, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); } static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; u32 ctl, ctl2, freq; ctl2 = intel_de_read(i915, BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CTL2, ctl2); } freq = panel->backlight.pwm_level_max; if (panel->backlight.combination_mode) freq /= 0xff; ctl = freq << 16; intel_de_write(i915, BLC_PWM_CTL, ctl); ctl2 = BLM_PIPE(pipe); if (panel->backlight.combination_mode) ctl2 |= BLM_COMBINATION_MODE; if (panel->backlight.active_low_pwm) ctl2 |= BLM_POLARITY_I965; intel_de_write(i915, BLC_PWM_CTL2, ctl2); intel_de_posting_read(i915, BLC_PWM_CTL2); intel_de_write(i915, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); intel_backlight_set_pwm_level(conn_state, level); } static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; u32 ctl, ctl2; ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2); } ctl = panel->backlight.pwm_level_max << 16; intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), ctl); /* XXX: combine this into above write? */ intel_backlight_set_pwm_level(conn_state, level); ctl2 = 0; if (panel->backlight.active_low_pwm) ctl2 |= BLM_POLARITY_I965; intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2); intel_de_posting_read(i915, VLV_BLC_PWM_CTL2(pipe)); intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE); } static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; u32 pwm_ctl, val; /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { val = intel_de_read(i915, UTIL_PIN_CTL); if (val & UTIL_PIN_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n", connector->base.base.id, connector->base.name); val &= ~UTIL_PIN_ENABLE; intel_de_write(i915, UTIL_PIN_CTL, val); } val = 0; if (panel->backlight.util_pin_active_low) val |= UTIL_PIN_POLARITY; intel_de_write(i915, UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); } pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", connector->base.base.id, connector->base.name); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); } intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller), panel->backlight.pwm_level_max); intel_backlight_set_pwm_level(conn_state, level); pwm_ctl = 0; if (panel->backlight.active_low_pwm) pwm_ctl |= BXT_BLC_PWM_POLARITY; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl | BXT_BLC_PWM_ENABLE); } static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pwm_ctl; pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { drm_dbg_kms(&i915->drm, "backlight already enabled\n"); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); } intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller), panel->backlight.pwm_level_max); intel_backlight_set_pwm_level(conn_state, level); pwm_ctl = 0; if (panel->backlight.active_low_pwm) pwm_ctl |= BXT_BLC_PWM_POLARITY; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl | BXT_BLC_PWM_ENABLE); } static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); panel->backlight.pwm_state.enabled = true; pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; WARN_ON(panel->backlight.max == 0); if (panel->backlight.level <= panel->backlight.min) { panel->backlight.level = panel->backlight.max; if (panel->backlight.device) panel->backlight.device->props.brightness = scale_hw_to_user(connector, panel->backlight.level, panel->backlight.device->props.max_brightness); } panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); panel->backlight.enabled = true; if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_UNBLANK; } void intel_backlight_enable(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; if (!panel->backlight.present) return; drm_dbg_kms(&i915->drm, "pipe %c\n", pipe_name(pipe)); mutex_lock(&i915->display.backlight.lock); __intel_backlight_enable(crtc_state, conn_state); mutex_unlock(&i915->display.backlight.lock); } #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) static u32 intel_panel_get_backlight(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 val = 0; mutex_lock(&i915->display.backlight.lock); if (panel->backlight.enabled) val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector)); mutex_unlock(&i915->display.backlight.lock); drm_dbg_kms(&i915->drm, "get backlight PWM = %d\n", val); return val; } /* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */ static u32 scale_user_to_hw(struct intel_connector *connector, u32 user_level, u32 user_max) { struct intel_panel *panel = &connector->panel; return scale(user_level, 0, user_max, panel->backlight.min, panel->backlight.max); } /* set backlight brightness to level in range [0..max], scaling wrt hw min */ static void intel_panel_set_backlight(const struct drm_connector_state *conn_state, u32 user_level, u32 user_max) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 hw_level; if (!panel->backlight.present) return; mutex_lock(&i915->display.backlight.lock); drm_WARN_ON(&i915->drm, panel->backlight.max == 0); hw_level = scale_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); mutex_unlock(&i915->display.backlight.lock); } static int intel_backlight_device_update_status(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); drm_dbg_kms(&i915->drm, "updating intel_backlight, brightness=%d/%d\n", bd->props.brightness, bd->props.max_brightness); intel_panel_set_backlight(connector->base.state, bd->props.brightness, bd->props.max_brightness); /* * Allow flipping bl_power as a sub-state of enabled. Sadly the * backlight class device does not make it easy to differentiate * between callbacks for brightness and bl_power, so our backlight_power * callback needs to take this into account. */ if (panel->backlight.enabled) { if (panel->backlight.power) { bool enable = bd->props.power == FB_BLANK_UNBLANK && bd->props.brightness != 0; panel->backlight.power(connector, enable); } } else { bd->props.power = FB_BLANK_POWERDOWN; } drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return 0; } static int intel_backlight_device_get_brightness(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct drm_i915_private *i915 = to_i915(connector->base.dev); intel_wakeref_t wakeref; int ret = 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { u32 hw_level; drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); hw_level = intel_panel_get_backlight(connector); ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); } return ret; } static const struct backlight_ops intel_backlight_device_ops = { .update_status = intel_backlight_device_update_status, .get_brightness = intel_backlight_device_get_brightness, }; int intel_backlight_device_register(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; struct backlight_properties props; struct backlight_device *bd; const char *name; int ret = 0; if (WARN_ON(panel->backlight.device)) return -ENODEV; if (!panel->backlight.present) return 0; WARN_ON(panel->backlight.max == 0); if (!acpi_video_backlight_use_native()) { drm_info(&i915->drm, "Skipping intel_backlight registration\n"); return 0; } memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; /* * Note: Everything should work even if the backlight device max * presented to the userspace is arbitrarily chosen. */ props.max_brightness = panel->backlight.max; props.brightness = scale_hw_to_user(connector, panel->backlight.level, props.max_brightness); if (panel->backlight.enabled) props.power = FB_BLANK_UNBLANK; else props.power = FB_BLANK_POWERDOWN; name = kstrdup("intel_backlight", GFP_KERNEL); if (!name) return -ENOMEM; bd = backlight_device_get_by_name(name); if (bd) { put_device(&bd->dev); /* * Using the same name independent of the drm device or connector * prevents registration of multiple backlight devices in the * driver. However, we need to use the default name for backward * compatibility. Use unique names for subsequent backlight devices as a * fallback when the default name already exists. */ kfree(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) return -ENOMEM; } bd = backlight_device_register(name, connector->base.kdev, connector, &intel_backlight_device_ops, &props); if (IS_ERR(bd)) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n", connector->base.base.id, connector->base.name, name, PTR_ERR(bd)); ret = PTR_ERR(bd); goto out; } panel->backlight.device = bd; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight device %s registered\n", connector->base.base.id, connector->base.name, name); out: kfree(name); return ret; } void intel_backlight_device_unregister(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; if (panel->backlight.device) { backlight_device_unregister(panel->backlight.device); panel->backlight.device = NULL; } } #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* * CNP: PWM clock frequency is 19.2 MHz or 24 MHz. * PWM increment = 1 */ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq), pwm_freq_hz); } /* * BXT: PWM clock frequency = 19.2 MHz. */ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz); } /* * SPT: This value represents the period of the PWM stream in clock periods * multiplied by 16 (default increment) or 128 (alternate increment selected in * SCHICKEN_1 bit 0). PWM clock is 24 MHz. */ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct intel_panel *panel = &connector->panel; u32 mul; if (panel->backlight.alternate_pwm_increment) mul = 128; else mul = 16; return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul); } /* * LPT: This value represents the period of the PWM stream in clock periods * multiplied by 128 (default increment) or 16 (alternate increment, selected in * LPT SOUTH_CHICKEN2 register bit 5). */ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 mul, clock; if (panel->backlight.alternate_pwm_increment) mul = 16; else mul = 128; if (HAS_PCH_LPT_H(i915)) clock = MHz(135); /* LPT:H */ else clock = MHz(24); /* LPT:LP */ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } /* * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH * display raw clocks multiplied by 128. */ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq), pwm_freq_hz * 128); } /* * Gen2: This field determines the number of time base events (display core * clock frequency/32) in total for a complete cycle of modulated backlight * control. * * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock) * divided by 32. */ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); int clock; if (IS_PINEVIEW(i915)) clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); else clock = KHz(i915->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); } /* * Gen4: This value represents the period of the PWM stream in display core * clocks ([DevCTG] HRAW clocks) multiplied by 128. * */ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); int clock; if (IS_G4X(i915)) clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); else clock = KHz(i915->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); } /* * VLV: This value represents the period of the PWM stream in display core * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks * multiplied by 16. CHV uses a 19.2MHz S0IX clock. */ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *i915 = to_i915(connector->base.dev); int mul, clock; if ((intel_de_read(i915, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { if (IS_CHERRYVIEW(i915)) clock = KHz(19200); else clock = MHz(25); mul = 16; } else { clock = KHz(RUNTIME_INFO(i915)->rawclk_freq); mul = 128; } return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } static u16 get_vbt_pwm_freq(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz; if (pwm_freq_hz) { drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n", pwm_freq_hz); } else { pwm_freq_hz = 200; drm_dbg_kms(&i915->drm, "default backlight frequency %u Hz\n", pwm_freq_hz); } return pwm_freq_hz; } static u32 get_backlight_max_vbt(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u16 pwm_freq_hz = get_vbt_pwm_freq(connector); u32 pwm; if (!panel->backlight.pwm_funcs->hz_to_pwm) { drm_dbg_kms(&i915->drm, "backlight frequency conversion not supported\n"); return 0; } pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz); if (!pwm) { drm_dbg_kms(&i915->drm, "backlight frequency conversion failed\n"); return 0; } return pwm; } /* * Note: The setup hooks can't assume pipe is set! */ static u32 get_backlight_min_vbt(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; int min; drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); /* * XXX: If the vbt value is 255, it makes min equal to max, which leads * to problems. There are such machines out there. Either our * interpretation is wrong or the vbt has bogus data. Or both. Safeguard * against this by letting the minimum be at most (arbitrarily chosen) * 25% of the max. */ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64); if (min != connector->panel.vbt.backlight.min_brightness) { drm_dbg_kms(&i915->drm, "clamping VBT min backlight %d/255 to %d/255\n", connector->panel.vbt.backlight.min_brightness, min); } /* vbt value is a coefficient in range [0..255] */ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max); } static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; bool alt, cpu_mode; if (HAS_PCH_LPT(i915)) alt = intel_de_read(i915, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; else alt = intel_de_read(i915, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; panel->backlight.alternate_pwm_increment = alt; pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2); panel->backlight.pwm_level_max = pch_ctl2 >> 16; cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) && !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) && (cpu_ctl2 & BLM_PWM_ENABLE); if (cpu_mode) { val = pch_get_backlight(connector, unused); drm_dbg_kms(&i915->drm, "CPU backlight register was enabled, switching to PCH override\n"); /* Write converted CPU PWM value to PCH override register */ lpt_set_backlight(connector->base.state, val); intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE); } drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 cpu_ctl2, pch_ctl1, pch_ctl2; pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2); panel->backlight.pwm_level_max = pch_ctl2 >> 16; if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && (pch_ctl1 & BLM_PCH_PWM_ENABLE); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, val; ctl = intel_de_read(i915, BLC_PWM_CTL); if (DISPLAY_VER(i915) == 2 || IS_I915GM(i915) || IS_I945GM(i915)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; if (IS_PINEVIEW(i915)) panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; panel->backlight.pwm_level_max = ctl >> 17; if (!panel->backlight.pwm_level_max) { panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); panel->backlight.pwm_level_max >>= 1; } if (!panel->backlight.pwm_level_max) return -ENODEV; if (panel->backlight.combination_mode) panel->backlight.pwm_level_max *= 0xff; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); val = i9xx_get_backlight(connector, unused); val = intel_backlight_invert_pwm_level(connector, val); val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); panel->backlight.pwm_enabled = val != 0; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, ctl2; ctl2 = intel_de_read(i915, BLC_PWM_CTL2); panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE; panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; ctl = intel_de_read(i915, BLC_PWM_CTL); panel->backlight.pwm_level_max = ctl >> 16; if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; if (panel->backlight.combination_mode) panel->backlight.pwm_level_max *= 0xff; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", connector->base.base.id, connector->base.name); return 0; } static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 ctl, ctl2; if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B)) return -ENODEV; ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; ctl = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)); panel->backlight.pwm_level_max = ctl >> 16; if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n", connector->base.base.id, connector->base.name, pipe_name(pipe)); return 0; } static int bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pwm_ctl, val; panel->backlight.controller = connector->panel.vbt.backlight.controller; pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { val = intel_de_read(i915, UTIL_PIN_CTL); panel->backlight.util_pin_active_low = val & UTIL_PIN_POLARITY; } panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; panel->backlight.pwm_level_max = intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller)); if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n", connector->base.base.id, connector->base.name, panel->backlight.controller); return 0; } static int cnp_num_backlight_controllers(struct drm_i915_private *i915) { if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1; if (INTEL_PCH_TYPE(i915) >= PCH_ICP) return 2; return 1; } static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller) { if (controller < 0 || controller >= cnp_num_backlight_controllers(i915)) return false; if (controller == 1 && INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) < PCH_MTP) return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; return true; } static int cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 pwm_ctl; /* * CNP has the BXT implementation of backlight, but with only one * controller. ICP+ can have two controllers, depending on pin muxing. */ panel->backlight.controller = connector->panel.vbt.backlight.controller; if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n", connector->base.base.id, connector->base.name, panel->backlight.controller); panel->backlight.controller = 0; } pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; panel->backlight.pwm_level_max = intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller)); if (!panel->backlight.pwm_level_max) panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); if (!panel->backlight.pwm_level_max) return -ENODEV; panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n", connector->base.base.id, connector->base.name, panel->backlight.controller); return 0; } static int ext_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; const char *desc; u32 level; /* Get the right PWM chip for DSI backlight according to VBT */ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_pmic_backlight"); desc = "PMIC"; } else { panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_soc_backlight"); desc = "SoC"; } if (IS_ERR(panel->backlight.pwm)) { drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n", connector->base.base.id, connector->base.name, desc); panel->backlight.pwm = NULL; return -ENODEV; } panel->backlight.pwm_level_max = 100; /* 100% */ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); if (pwm_is_enabled(panel->backlight.pwm)) { /* PWM is already enabled, use existing settings */ pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state); level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state, 100); level = intel_backlight_invert_pwm_level(connector, level); panel->backlight.pwm_enabled = true; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n", connector->base.base.id, connector->base.name, NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, get_vbt_pwm_freq(connector), level); } else { /* Set period from VBT frequency, leave other settings at 0. */ panel->backlight.pwm_state.period = NSEC_PER_SEC / get_vbt_pwm_freq(connector); } drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n", connector->base.base.id, connector->base.name, desc); return 0; } static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; panel->backlight.pwm_funcs->set(conn_state, intel_backlight_invert_pwm_level(connector, level)); } static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe) { struct intel_panel *panel = &connector->panel; return intel_backlight_invert_pwm_level(connector, panel->backlight.pwm_funcs->get(connector, pipe)); } static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; panel->backlight.pwm_funcs->enable(crtc_state, conn_state, intel_backlight_invert_pwm_level(connector, level)); } static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, level)); } static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct intel_panel *panel = &connector->panel; int ret; ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) return ret; panel->backlight.min = panel->backlight.pwm_level_min; panel->backlight.max = panel->backlight.pwm_level_max; panel->backlight.level = intel_pwm_get_backlight(connector, pipe); panel->backlight.enabled = panel->backlight.pwm_enabled; return 0; } void intel_backlight_update(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (!panel->backlight.present) return; mutex_lock(&i915->display.backlight.lock); if (!panel->backlight.enabled) __intel_backlight_enable(crtc_state, conn_state); mutex_unlock(&i915->display.backlight.lock); } int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; int ret; if (!connector->panel.vbt.backlight.present) { if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n", connector->base.base.id, connector->base.name); } else { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] no backlight present per VBT\n", connector->base.base.id, connector->base.name); return 0; } } /* ensure intel_panel has been initialized first */ if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs)) return -ENODEV; /* set level and max in panel struct */ mutex_lock(&i915->display.backlight.lock); ret = panel->backlight.funcs->setup(connector, pipe); mutex_unlock(&i915->display.backlight.lock); if (ret) { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] failed to setup backlight\n", connector->base.base.id, connector->base.name); return ret; } panel->backlight.present = true; drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n", connector->base.base.id, connector->base.name, str_enabled_disabled(panel->backlight.enabled), panel->backlight.level, panel->backlight.max); return 0; } void intel_backlight_destroy(struct intel_panel *panel) { /* dispose of the pwm */ if (panel->backlight.pwm) pwm_put(panel->backlight.pwm); panel->backlight.present = false; } static const struct intel_panel_bl_funcs bxt_pwm_funcs = { .setup = bxt_setup_backlight, .enable = bxt_enable_backlight, .disable = bxt_disable_backlight, .set = bxt_set_backlight, .get = bxt_get_backlight, .hz_to_pwm = bxt_hz_to_pwm, }; static const struct intel_panel_bl_funcs cnp_pwm_funcs = { .setup = cnp_setup_backlight, .enable = cnp_enable_backlight, .disable = cnp_disable_backlight, .set = bxt_set_backlight, .get = bxt_get_backlight, .hz_to_pwm = cnp_hz_to_pwm, }; static const struct intel_panel_bl_funcs lpt_pwm_funcs = { .setup = lpt_setup_backlight, .enable = lpt_enable_backlight, .disable = lpt_disable_backlight, .set = lpt_set_backlight, .get = lpt_get_backlight, .hz_to_pwm = lpt_hz_to_pwm, }; static const struct intel_panel_bl_funcs spt_pwm_funcs = { .setup = lpt_setup_backlight, .enable = lpt_enable_backlight, .disable = lpt_disable_backlight, .set = lpt_set_backlight, .get = lpt_get_backlight, .hz_to_pwm = spt_hz_to_pwm, }; static const struct intel_panel_bl_funcs pch_pwm_funcs = { .setup = pch_setup_backlight, .enable = pch_enable_backlight, .disable = pch_disable_backlight, .set = pch_set_backlight, .get = pch_get_backlight, .hz_to_pwm = pch_hz_to_pwm, }; static const struct intel_panel_bl_funcs ext_pwm_funcs = { .setup = ext_pwm_setup_backlight, .enable = ext_pwm_enable_backlight, .disable = ext_pwm_disable_backlight, .set = ext_pwm_set_backlight, .get = ext_pwm_get_backlight, }; static const struct intel_panel_bl_funcs vlv_pwm_funcs = { .setup = vlv_setup_backlight, .enable = vlv_enable_backlight, .disable = vlv_disable_backlight, .set = vlv_set_backlight, .get = vlv_get_backlight, .hz_to_pwm = vlv_hz_to_pwm, }; static const struct intel_panel_bl_funcs i965_pwm_funcs = { .setup = i965_setup_backlight, .enable = i965_enable_backlight, .disable = i965_disable_backlight, .set = i9xx_set_backlight, .get = i9xx_get_backlight, .hz_to_pwm = i965_hz_to_pwm, }; static const struct intel_panel_bl_funcs i9xx_pwm_funcs = { .setup = i9xx_setup_backlight, .enable = i9xx_enable_backlight, .disable = i9xx_disable_backlight, .set = i9xx_set_backlight, .get = i9xx_get_backlight, .hz_to_pwm = i9xx_hz_to_pwm, }; static const struct intel_panel_bl_funcs pwm_bl_funcs = { .setup = intel_pwm_setup_backlight, .enable = intel_pwm_enable_backlight, .disable = intel_pwm_disable_backlight, .set = intel_pwm_set_backlight, .get = intel_pwm_get_backlight, }; /* Set up chip specific backlight functions */ void intel_backlight_init_funcs(struct intel_panel *panel) { struct intel_connector *connector = container_of(panel, struct intel_connector, panel); struct drm_i915_private *i915 = to_i915(connector->base.dev); if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && intel_dsi_dcs_init_backlight_funcs(connector) == 0) return; if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { panel->backlight.pwm_funcs = &bxt_pwm_funcs; } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) { panel->backlight.pwm_funcs = &cnp_pwm_funcs; } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) { if (HAS_PCH_LPT(i915)) panel->backlight.pwm_funcs = &lpt_pwm_funcs; else panel->backlight.pwm_funcs = &spt_pwm_funcs; } else if (HAS_PCH_SPLIT(i915)) { panel->backlight.pwm_funcs = &pch_pwm_funcs; } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { panel->backlight.pwm_funcs = &ext_pwm_funcs; } else { panel->backlight.pwm_funcs = &vlv_pwm_funcs; } } else if (DISPLAY_VER(i915) == 4) { panel->backlight.pwm_funcs = &i965_pwm_funcs; } else { panel->backlight.pwm_funcs = &i9xx_pwm_funcs; } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { if (intel_dp_aux_init_backlight_funcs(connector) == 0) return; if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) connector->panel.backlight.power = intel_pps_backlight_power; } /* We're using a standard PWM backlight interface */ panel->backlight.funcs = &pwm_bl_funcs; }
linux-master
drivers/gpu/drm/i915/display/intel_backlight.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include "i915_reg.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_cursor.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" #include "intel_psr.h" #include "skl_watermark.h" /* Cursor formats */ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 base; if (DISPLAY_INFO(dev_priv)->cursor_needs_physical) base = sg_dma_address(obj->mm.pages->sgl); else base = intel_plane_ggtt_offset(plane_state); return base + plane_state->view.color_plane[0].offset; } static u32 intel_cursor_position(const struct intel_plane_state *plane_state) { int x = plane_state->uapi.dst.x1; int y = plane_state->uapi.dst.y1; u32 pos = 0; if (x < 0) { pos |= CURSOR_POS_X_SIGN; x = -x; } pos |= CURSOR_POS_X(x); if (y < 0) { pos |= CURSOR_POS_Y_SIGN; y = -y; } pos |= CURSOR_POS_Y(y); return pos; } static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) { const struct drm_mode_config *config = &plane_state->uapi.plane->dev->mode_config; int width = drm_rect_width(&plane_state->uapi.dst); int height = drm_rect_height(&plane_state->uapi.dst); return width > 0 && width <= config->cursor_width && height > 0 && height <= config->cursor_height; } static int intel_cursor_check_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); unsigned int rotation = plane_state->hw.rotation; int src_x, src_y; u32 offset; int ret; ret = intel_plane_compute_gtt(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; src_x = plane_state->uapi.src.x1 >> 16; src_y = plane_state->uapi.src.y1 >> 16; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&src_x, &src_y, plane_state, 0); if (src_x != 0 || src_y != 0) { drm_dbg_kms(&dev_priv->drm, "Arbitrary cursor panning not supported\n"); return -EINVAL; } /* * Put the final coordinates back so that the src * coordinate checks will see the right values. */ drm_rect_translate_to(&plane_state->uapi.src, src_x << 16, src_y << 16); /* ILK+ do this automagically in hardware */ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { const struct drm_framebuffer *fb = plane_state->hw.fb; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; offset += (src_h * src_w - 1) * fb->format->cpp[0]; } plane_state->view.color_plane[0].offset = offset; plane_state->view.color_plane[0].x = src_x; plane_state->view.color_plane[0].y = src_y; return 0; } static int intel_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_rect src = plane_state->uapi.src; const struct drm_rect dst = plane_state->uapi.dst; int ret; if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); return -EINVAL; } ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, true); if (ret) return ret; /* Use the unclipped src/dst rectangles, which we program to hw */ plane_state->uapi.src = src; plane_state->uapi.dst = dst; /* final plane coordinates will be relative to the plane's pipe */ drm_rect_translate(&plane_state->uapi.dst, -crtc_state->pipe_src.x1, -crtc_state->pipe_src.y1); ret = intel_cursor_check_surface(plane_state); if (ret) return ret; if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); if (ret) return ret; return 0; } static unsigned int i845_cursor_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { return 2048; } static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 cntl = 0; if (crtc_state->gamma_enable) cntl |= CURSOR_PIPE_GAMMA_ENABLE; return cntl; } static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { return CURSOR_ENABLE | CURSOR_FORMAT_ARGB | CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride); } static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) { int width = drm_rect_width(&plane_state->uapi.dst); /* * 845g/865g are only limited by the width of their cursors, * the height is arbitrary up to the precision of the register. */ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); } static int i845_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); int ret; ret = intel_check_cursor(crtc_state, plane_state); if (ret) return ret; /* if we want to turn off the cursor ignore width and height */ if (!fb) return 0; /* Check for which cursor types we support */ if (!i845_cursor_size_ok(plane_state)) { drm_dbg_kms(&i915->drm, "Cursor dimension %dx%d not supported\n", drm_rect_width(&plane_state->uapi.dst), drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } drm_WARN_ON(&i915->drm, plane_state->uapi.visible && plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); switch (fb->pitches[0]) { case 256: case 512: case 1024: case 2048: break; default: drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", fb->pitches[0]); return -EINVAL; } plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); return 0; } /* TODO: split into noarm+arm pair */ static void i845_cursor_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 cntl = 0, base = 0, pos = 0, size = 0; if (plane_state && plane_state->uapi.visible) { unsigned int width = drm_rect_width(&plane_state->uapi.dst); unsigned int height = drm_rect_height(&plane_state->uapi.dst); cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); } /* On these chipsets we can only modify the base/size/stride * whilst the cursor is disabled. */ if (plane->cursor.base != base || plane->cursor.size != size || plane->cursor.cntl != cntl) { intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); intel_de_write_fw(dev_priv, CURSIZE(PIPE_A), size); intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); plane->cursor.base = base; plane->cursor.size = size; plane->cursor.cntl = cntl; } else { intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); } } static void i845_cursor_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { i845_cursor_update_arm(plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; bool ret; power_domain = POWER_DOMAIN_PIPE(PIPE_A); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; *pipe = PIPE_A; intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static unsigned int i9xx_cursor_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) { return plane->base.dev->mode_config.cursor_width * 4; } static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 cntl = 0; if (DISPLAY_VER(dev_priv) >= 11) return cntl; if (crtc_state->gamma_enable) cntl = MCURSOR_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) cntl |= MCURSOR_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) cntl |= MCURSOR_PIPE_SEL(crtc->pipe); return cntl; } static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); u32 cntl = 0; if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) cntl |= MCURSOR_TRICKLE_FEED_DISABLE; switch (drm_rect_width(&plane_state->uapi.dst)) { case 64: cntl |= MCURSOR_MODE_64_ARGB_AX; break; case 128: cntl |= MCURSOR_MODE_128_ARGB_AX; break; case 256: cntl |= MCURSOR_MODE_256_ARGB_AX; break; default: MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); return 0; } if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) cntl |= MCURSOR_ROTATE_180; /* Wa_22012358565:adl-p */ if (DISPLAY_VER(dev_priv) == 13) cntl |= MCURSOR_ARB_SLOTS(1); return cntl; } static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); int width = drm_rect_width(&plane_state->uapi.dst); int height = drm_rect_height(&plane_state->uapi.dst); if (!intel_cursor_size_ok(plane_state)) return false; /* Cursor width is limited to a few power-of-two sizes */ switch (width) { case 256: case 128: case 64: break; default: return false; } /* * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor * height from 8 lines up to the cursor width, when the * cursor is not rotated. Everything else requires square * cursors. */ if (HAS_CUR_FBC(dev_priv) && plane_state->hw.rotation & DRM_MODE_ROTATE_0) { if (height < 8 || height > width) return false; } else { if (height != width) return false; } return true; } static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; int ret; ret = intel_check_cursor(crtc_state, plane_state); if (ret) return ret; /* if we want to turn off the cursor ignore width and height */ if (!fb) return 0; /* Check for which cursor types we support */ if (!i9xx_cursor_size_ok(plane_state)) { drm_dbg(&dev_priv->drm, "Cursor dimension %dx%d not supported\n", drm_rect_width(&plane_state->uapi.dst), drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); if (fb->pitches[0] != drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { drm_dbg_kms(&dev_priv->drm, "Invalid cursor stride (%u) (cursor width %d)\n", fb->pitches[0], drm_rect_width(&plane_state->uapi.dst)); return -EINVAL; } /* * There's something wrong with the cursor on CHV pipe C. * If it straddles the left edge of the screen then * moving it away from the edge or disabling it often * results in a pipe underrun, and often that can lead to * dead pipe (constant underrun reported, and it scans * out just a solid color). To recover from that, the * display power well must be turned off and on again. * Refuse the put the cursor into that compromised position. */ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { drm_dbg_kms(&dev_priv->drm, "CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); return 0; } /* TODO: split into noarm+arm pair */ static void i9xx_cursor_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; if (plane_state && plane_state->uapi.visible) { int width = drm_rect_width(&plane_state->uapi.dst); int height = drm_rect_height(&plane_state->uapi.dst); cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); if (width != height) fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); } /* * On some platforms writing CURCNTR first will also * cause CURPOS to be armed by the CURBASE write. * Without the CURCNTR write the CURPOS write would * arm itself. Thus we always update CURCNTR before * CURPOS. * * On other platforms CURPOS always requires the * CURBASE write to arm the update. Additonally * a write to any of the cursor register will cancel * an already armed cursor update. Thus leaving out * the CURBASE write after CURPOS could lead to a * cursor that doesn't appear to move, or even change * shape. Thus we always write CURBASE. * * The other registers are armed by the CURBASE write * except when the plane is getting enabled at which time * the CURCNTR write arms the update. */ if (DISPLAY_VER(dev_priv) >= 9) skl_write_cursor_wm(plane, crtc_state); if (plane_state) intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); else intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { if (HAS_CUR_FBC(dev_priv)) intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), fbc_ctl); intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); intel_de_write_fw(dev_priv, CURPOS(pipe), pos); intel_de_write_fw(dev_priv, CURBASE(pipe), base); plane->cursor.base = base; plane->cursor.size = fbc_ctl; plane->cursor.cntl = cntl; } else { intel_de_write_fw(dev_priv, CURPOS(pipe), pos); intel_de_write_fw(dev_priv, CURBASE(pipe), base); } } static void i9xx_cursor_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { i9xx_cursor_update_arm(plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; bool ret; u32 val; /* * Not 100% correct for planes that can move between pipes, * but that's only the case for gen2-3 which don't have any * display power wells. */ power_domain = POWER_DOMAIN_PIPE(plane->pipe); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return false; val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); ret = val & MCURSOR_MODE_MASK; if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) *pipe = plane->pipe; else *pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); return ret; } static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; return format == DRM_FORMAT_ARGB8888; } static int intel_legacy_cursor_update(struct drm_plane *_plane, struct drm_crtc *_crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, u32 src_x, u32 src_y, u32 src_w, u32 src_h, struct drm_modeset_acquire_ctx *ctx) { struct intel_plane *plane = to_intel_plane(_plane); struct intel_crtc *crtc = to_intel_crtc(_crtc); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->base.state); struct intel_plane_state *new_plane_state; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_crtc_state *new_crtc_state; int ret; /* * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath. * PSR2 selective fetch also requires the slow path as * PSR2 plane and transcoder registers can only be updated during * vblank. * * FIXME bigjoiner fastpath would be good */ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) || intel_crtc_needs_fastset(crtc_state) || crtc_state->bigjoiner_pipes) goto slow; /* * Don't do an async update if there is an outstanding commit modifying * the plane. This prevents our async update's changes from getting * overridden by a previous synchronous update's state. */ if (old_plane_state->uapi.commit && !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) goto slow; /* * If any parameters change that may affect watermarks, * take the slowpath. Only changing fb or position should be * in the fastpath. */ if (old_plane_state->uapi.crtc != &crtc->base || old_plane_state->uapi.src_w != src_w || old_plane_state->uapi.src_h != src_h || old_plane_state->uapi.crtc_w != crtc_w || old_plane_state->uapi.crtc_h != crtc_h || !old_plane_state->uapi.fb != !fb) goto slow; new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); if (!new_plane_state) return -ENOMEM; new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); if (!new_crtc_state) { ret = -ENOMEM; goto out_free; } drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); new_plane_state->uapi.src_x = src_x; new_plane_state->uapi.src_y = src_y; new_plane_state->uapi.src_w = src_w; new_plane_state->uapi.src_h = src_h; new_plane_state->uapi.crtc_x = crtc_x; new_plane_state->uapi.crtc_y = crtc_y; new_plane_state->uapi.crtc_w = crtc_w; new_plane_state->uapi.crtc_h = crtc_h; intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, old_plane_state, new_plane_state); if (ret) goto out_free; ret = intel_plane_pin_fb(new_plane_state); if (ret) goto out_free; intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), ORIGIN_CURSOR_UPDATE); intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), to_intel_frontbuffer(new_plane_state->hw.fb), plane->frontbuffer_bit); /* Swap plane state */ plane->base.state = &new_plane_state->uapi; /* * We cannot swap crtc_state as it may be in use by an atomic commit or * page flip that's running simultaneously. If we swap crtc_state and * destroy the old state, we will cause a use-after-free there. * * Only update active_planes, which is needed for our internal * bookkeeping. Either value will do the right thing when updating * planes atomically. If the cursor was part of the atomic update then * we would have taken the slowpath. */ crtc_state->active_planes = new_crtc_state->active_planes; /* * Technically we should do a vblank evasion here to make * sure all the cursor registers update on the same frame. * For now just make sure the register writes happen as * quickly as possible to minimize the race window. */ local_irq_disable(); if (new_plane_state->uapi.visible) { intel_plane_update_noarm(plane, crtc_state, new_plane_state); intel_plane_update_arm(plane, crtc_state, new_plane_state); } else { intel_plane_disable_arm(plane, crtc_state); } local_irq_enable(); intel_plane_unpin_fb(old_plane_state); out_free: if (new_crtc_state) intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); if (ret) intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); else intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); return ret; slow: return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, ctx); } static const struct drm_plane_funcs intel_cursor_plane_funcs = { .update_plane = intel_legacy_cursor_update, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, .format_mod_supported = intel_cursor_format_mod_supported, }; struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *cursor; int ret, zpos; u64 *modifiers; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) return cursor; cursor->pipe = pipe; cursor->i9xx_plane = (enum i9xx_plane_id) pipe; cursor->id = PLANE_CURSOR; cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->max_stride = i845_cursor_max_stride; cursor->update_arm = i845_cursor_update_arm; cursor->disable_arm = i845_cursor_disable_arm; cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->max_stride = i9xx_cursor_max_stride; cursor->update_arm = i9xx_cursor_update_arm; cursor->disable_arm = i9xx_cursor_disable_arm; cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } cursor->cursor.base = ~0; cursor->cursor.cntl = ~0; if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE); ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), modifiers, DRM_PLANE_TYPE_CURSOR, "cursor %c", pipe_name(pipe)); kfree(modifiers); if (ret) goto fail; if (DISPLAY_VER(dev_priv) >= 4) drm_plane_create_rotation_property(&cursor->base, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); zpos = DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; drm_plane_create_zpos_immutable_property(&cursor->base, zpos); if (DISPLAY_VER(dev_priv) >= 12) drm_plane_enable_fb_damage_clips(&cursor->base); intel_plane_helper_add(cursor); return cursor; fail: intel_plane_free(cursor); return ERR_PTR(ret); }
linux-master
drivers/gpu/drm/i915/display/intel_cursor.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <uapi/drm/i915_drm.h> #include "intel_memory_region.h" #include "i915_gem_region.h" #include "i915_drv.h" #include "i915_trace.h" void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem) { obj->mm.region = mem; mutex_lock(&mem->objects.lock); list_add(&obj->mm.region_link, &mem->objects.list); mutex_unlock(&mem->objects.lock); } void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) { struct intel_memory_region *mem = obj->mm.region; mutex_lock(&mem->objects.lock); list_del(&obj->mm.region_link); mutex_unlock(&mem->objects.lock); } static struct drm_i915_gem_object * __i915_gem_object_create_region(struct intel_memory_region *mem, resource_size_t offset, resource_size_t size, resource_size_t page_size, unsigned int flags) { struct drm_i915_gem_object *obj; resource_size_t default_page_size; int err; /* * NB: Our use of resource_size_t for the size stems from using struct * resource for the mem->region. We might need to revisit this in the * future. */ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY && (flags & I915_BO_ALLOC_CPU_CLEAR || flags & I915_BO_ALLOC_PM_EARLY))) return ERR_PTR(-EINVAL); if (!mem) return ERR_PTR(-ENODEV); default_page_size = mem->min_page_size; if (page_size) default_page_size = page_size; /* We should be able to fit a page within an sg entry */ GEM_BUG_ON(overflows_type(default_page_size, u32)); GEM_BUG_ON(!is_power_of_2_u64(default_page_size)); GEM_BUG_ON(default_page_size < PAGE_SIZE); size = round_up(size, default_page_size); if (default_page_size == size) flags |= I915_BO_ALLOC_CONTIGUOUS; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT)); if (i915_gem_object_size_2big(size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); /* * Anything smaller than the min_page_size can't be freely inserted into * the GTT, due to alignemnt restrictions. For such special objects, * make sure we force memcpy based suspend-resume. In the future we can * revisit this, either by allowing special mis-aligned objects in the * migration path, or by mapping all of LMEM upfront using cheap 1G * GTT entries. */ if (default_page_size < mem->min_page_size) flags |= I915_BO_ALLOC_PM_EARLY; err = mem->ops->init_object(mem, obj, offset, size, page_size, flags); if (err) goto err_object_free; trace_i915_gem_object_create(obj); return obj; err_object_free: i915_gem_object_free(obj); return ERR_PTR(err); } struct drm_i915_gem_object * i915_gem_object_create_region(struct intel_memory_region *mem, resource_size_t size, resource_size_t page_size, unsigned int flags) { return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET, size, page_size, flags); } struct drm_i915_gem_object * i915_gem_object_create_region_at(struct intel_memory_region *mem, resource_size_t offset, resource_size_t size, unsigned int flags) { GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET); if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) || GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size))) return ERR_PTR(-EINVAL); if (range_overflows(offset, size, resource_size(&mem->region))) return ERR_PTR(-EINVAL); if (!(flags & I915_BO_ALLOC_GPU_ONLY) && offset + size > mem->io_size && !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt)) return ERR_PTR(-ENOSPC); return __i915_gem_object_create_region(mem, offset, size, 0, flags | I915_BO_ALLOC_CONTIGUOUS); } /** * i915_gem_process_region - Iterate over all objects of a region using ops * to process and optionally skip objects * @mr: The memory region * @apply: ops and private data * * This function can be used to iterate over the regions object list, * checking whether to skip objects, and, if not, lock the objects and * process them using the supplied ops. Note that this function temporarily * removes objects from the region list while iterating, so that if run * concurrently with itself may not iterate over all objects. * * Return: 0 if successful, negative error code on failure. */ int i915_gem_process_region(struct intel_memory_region *mr, struct i915_gem_apply_to_region *apply) { const struct i915_gem_apply_to_region_ops *ops = apply->ops; struct drm_i915_gem_object *obj; struct list_head still_in_list; int ret = 0; /* * In the future, a non-NULL apply->ww could mean the caller is * already in a locking transaction and provides its own context. */ GEM_WARN_ON(apply->ww); INIT_LIST_HEAD(&still_in_list); mutex_lock(&mr->objects.lock); for (;;) { struct i915_gem_ww_ctx ww; obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj), mm.region_link); if (!obj) break; list_move_tail(&obj->mm.region_link, &still_in_list); if (!kref_get_unless_zero(&obj->base.refcount)) continue; /* * Note: Someone else might be migrating the object at this * point. The object's region is not stable until we lock * the object. */ mutex_unlock(&mr->objects.lock); apply->ww = &ww; for_i915_gem_ww(&ww, ret, apply->interruptible) { ret = i915_gem_object_lock(obj, apply->ww); if (ret) continue; if (obj->mm.region == mr) ret = ops->process_obj(apply, obj); /* Implicit object unlock */ } i915_gem_object_put(obj); mutex_lock(&mr->objects.lock); if (ret) break; } list_splice_tail(&still_in_list, &mr->objects.list); mutex_unlock(&mr->objects.lock); return ret; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_region.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/anon_inodes.h> #include <linux/mman.h> #include <linux/pfn_t.h> #include <linux/sizes.h> #include <drm/drm_cache.h> #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_gem_evict.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_gem_mman.h" #include "i915_mm.h" #include "i915_trace.h" #include "i915_user_extensions.h" #include "i915_gem_ttm.h" #include "i915_vma.h" static inline bool __vma_matches(struct vm_area_struct *vma, struct file *filp, unsigned long addr, unsigned long size) { if (vma->vm_file != filp) return false; return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); } /** * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address * it is mapped to. * @dev: drm device * @data: ioctl data blob * @file: drm file * * While the mapping holds a reference on the contents of the object, it doesn't * imply a ref on the object itself. * * IMPORTANT: * * DRM driver writers who look a this function as an example for how to do GEM * mmap support, please don't implement mmap support like here. The modern way * to implement DRM mmap support is with an mmap offset ioctl (like * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. * That way debug tooling like valgrind will understand what's going on, hiding * the mmap call in a driver private ioctl will break that. The i915 driver only * does cpu mmaps this way because we didn't know better. */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; /* * mmap ioctl is disallowed for all discrete platforms, * and for all platforms with GRAPHICS_VER > 12. */ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; if (args->flags & I915_MMAP_WC && !pat_enabled()) return -ENODEV; obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; /* prime objects have no backing filp to GEM mmap * pages from. */ if (!obj->base.filp) { addr = -ENXIO; goto err; } if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { addr = -EINVAL; goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); if (IS_ERR_VALUE(addr)) goto err; if (args->flags & I915_MMAP_WC) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (mmap_write_lock_killable(mm)) { addr = -EINTR; goto err; } vma = find_vma(mm, addr); if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else addr = -ENOMEM; mmap_write_unlock(mm); if (IS_ERR_VALUE(addr)) goto err; } i915_gem_object_put(obj); args->addr_ptr = (u64)addr; return 0; err: i915_gem_object_put(obj); return addr; } static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) { return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; } /** * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps * * A history of the GTT mmap interface: * * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to * aligned and suitable for fencing, and still fit into the available * mappable space left by the pinned display objects. A classic problem * we called the page-fault-of-doom where we would ping-pong between * two objects that could not fit inside the GTT and so the memcpy * would page one object in at the expense of the other between every * single byte. * * 1 - Objects can be any size, and have any compatible fencing (X Y, or none * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the * object is too large for the available space (or simply too large * for the mappable aperture!), a view is created instead and faulted * into userspace. (This view is aligned and sized appropriately for * fenced access.) * * 2 - Recognise WC as a separate cache domain so that we can flush the * delayed writes via GTT before performing direct access via WC. * * 3 - Remove implicit set-domain(GTT) and synchronisation on initial * pagefault; swapin remains transparent. * * 4 - Support multiple fault handlers per object depending on object's * backing storage (a.k.a. MMAP_OFFSET). * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine * hangs on some architectures, corruption on others. An attempt to service * a GTT page fault from a snoopable object will generate a SIGBUS. * * * the object must be able to fit into RAM (physical memory, though no * limited to the mappable aperture). * * * Caveats: * * * a new GTT page fault will synchronize rendering from the GPU and flush * all data to system memory. Subsequent access will not be synchronized. * * * all mappings are revoked on runtime device suspend. * * * there are only 8, 16 or 32 fence registers to share between all users * (older machines require fence register for display and blitter access * as well). Contention of the fence registers will cause the previous users * to be unmapped and any new access will generate new page faults. * * * running out of memory while servicing a fault may generate a SIGBUS, * rather than the expected SIGSEGV. */ int i915_gem_mmap_gtt_version(void) { return 4; } static inline struct i915_gtt_view compute_partial_view(const struct drm_i915_gem_object *obj, pgoff_t page_offset, unsigned int chunk) { struct i915_gtt_view view; if (i915_gem_object_is_tiled(obj)) chunk = roundup(chunk, tile_row_pages(obj) ?: 1); view.type = I915_GTT_VIEW_PARTIAL; view.partial.offset = rounddown(page_offset, chunk); view.partial.size = min_t(unsigned int, chunk, (obj->base.size >> PAGE_SHIFT) - view.partial.offset); /* If the partial covers the entire object, just create a normal VMA. */ if (chunk >= obj->base.size >> PAGE_SHIFT) view.type = I915_GTT_VIEW_NORMAL; return view; } static vm_fault_t i915_error_to_vmf_fault(int err) { switch (err) { default: WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); fallthrough; case -EIO: /* shmemfs failure from swap device */ case -EFAULT: /* purged object */ case -ENODEV: /* bad object, how did you get here! */ case -ENXIO: /* unable to access backing store (on device) */ return VM_FAULT_SIGBUS; case -ENOMEM: /* our allocation failure */ return VM_FAULT_OOM; case 0: case -EAGAIN: case -ENOSPC: /* transient failure to evict? */ case -ERESTARTSYS: case -EINTR: case -EBUSY: /* * EBUSY is ok: this just means that another thread * already did the job. */ return VM_FAULT_NOPAGE; } } static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) { struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; resource_size_t iomap; int err; /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; if (i915_gem_object_lock_interruptible(obj, NULL)) return VM_FAULT_NOPAGE; err = i915_gem_object_pin_pages(obj); if (err) goto out; iomap = -1; if (!i915_gem_object_has_struct_page(obj)) { iomap = obj->mm.region->iomap.base; iomap -= obj->mm.region->region.start; } /* PTEs are revoked in obj->ops->put_pages() */ err = remap_io_sg(area, area->vm_start, area->vm_end - area->vm_start, obj->mm.pages->sgl, iomap); if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); obj->mm.dirty = true; } i915_gem_object_unpin_pages(obj); out: i915_gem_object_unlock(obj); return i915_error_to_vmf_fault(err); } static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_ggtt *ggtt = to_gt(i915)->ggtt; bool write = area->vm_flags & VM_WRITE; struct i915_gem_ww_ctx ww; intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; int srcu; int ret; /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; trace_i915_gem_object_fault(obj, page_offset, true, write); wakeref = intel_runtime_pm_get(rpm); i915_gem_ww_ctx_init(&ww, true); retry: ret = i915_gem_object_lock(obj, &ww); if (ret) goto err_rpm; /* Sanity check that we allow writing into this object */ if (i915_gem_object_is_readonly(obj) && write) { ret = -EFAULT; goto err_rpm; } ret = i915_gem_object_pin_pages(obj); if (ret) goto err_rpm; ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu); if (ret) goto err_pages; /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK /* NOWARN */ | PIN_NOEVICT); if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { /* Use a partial view if it is bigger than available space */ struct i915_gtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); unsigned int flags; flags = PIN_MAPPABLE | PIN_NOSEARCH; if (view.type == I915_GTT_VIEW_NORMAL) flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ /* * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { flags = PIN_MAPPABLE; view.type = I915_GTT_VIEW_PARTIAL; vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); } /* * The entire mappable GGTT is pinned? Unexpected! * Try to evict the object we locked too, as normally we skip it * due to lack of short term pinning inside execbuf. */ if (vma == ERR_PTR(-ENOSPC)) { ret = mutex_lock_interruptible(&ggtt->vm.mutex); if (!ret) { ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); mutex_unlock(&ggtt->vm.mutex); } if (ret) goto err_reset; vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_reset; } /* Access to snoopable pages through the GTT is incoherent. */ /* * For objects created by userspace through GEM_CREATE with pat_index * set by set_pat extension, coherency is managed by userspace, make * sure we don't fail handling the vm fault by calling * i915_gem_object_has_cache_level() which always return true for such * objects. Otherwise this helper function would fall back to checking * whether the object is un-cached. */ if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) || HAS_LLC(i915))) { ret = -EFAULT; goto err_unpin; } ret = i915_vma_pin_fence(vma); if (ret) goto err_unpin; /* Finally, remap it using the new GTT offset */ ret = remap_io_mapping(area, area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->iomap); if (ret) goto err_fence; assert_rpm_wakelock_held(rpm); /* Mark as being mmapped into userspace for later revocation */ mutex_lock(&to_gt(i915)->ggtt->vm.mutex); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); /* Track the mmo associated with the fenced vma */ vma->mmo = mmo; if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); if (write) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); i915_vma_set_ggtt_write(vma); obj->mm.dirty = true; } err_fence: i915_vma_unpin_fence(vma); err_unpin: __i915_vma_unpin(vma); err_reset: intel_gt_reset_unlock(ggtt->vm.gt, srcu); err_pages: i915_gem_object_unpin_pages(obj); err_rpm: if (ret == -EDEADLK) { ret = i915_gem_ww_ctx_backoff(&ww); if (!ret) goto retry; } i915_gem_ww_ctx_fini(&ww); intel_runtime_pm_put(rpm, wakeref); return i915_error_to_vmf_fault(ret); } static int vm_access(struct vm_area_struct *area, unsigned long addr, void *buf, int len, int write) { struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; struct i915_gem_ww_ctx ww; void *vaddr; int err = 0; if (i915_gem_object_is_readonly(obj) && write) return -EACCES; addr -= area->vm_start; if (range_overflows_t(u64, addr, len, obj->base.size)) return -EINVAL; i915_gem_ww_ctx_init(&ww, true); retry: err = i915_gem_object_lock(obj, &ww); if (err) goto out; /* As this is primarily for debugging, let's focus on simplicity */ vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out; } if (write) { memcpy(vaddr + addr, buf, len); __i915_gem_object_flush_map(obj, addr, len); } else { memcpy(buf, vaddr + addr, len); } i915_gem_object_unpin_map(obj); out: if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); if (err) return err; return len; } void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct i915_vma *vma; GEM_BUG_ON(!obj->userfault_count); for_each_ggtt_vma(vma, obj) i915_vma_revoke_mmap(vma); GEM_BUG_ON(obj->userfault_count); } /* * It is vital that we remove the page mapping if we have mapped a tiled * object through the GTT and then lose the fence register due to * resource pressure. Similarly if the object has been moved out of the * aperture, than pages mapped into userspace must be revoked. Removing the * mapping will then trigger a page fault on the next user access, allowing * fixup by vm_fault_gtt(). */ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); intel_wakeref_t wakeref; /* * Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user * pagefault then has to wait until we release the mutex. * * Note that RPM complicates somewhat by adding an additional * requirement that operations to the GGTT be made holding the RPM * wakeref. */ wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_lock(&to_gt(i915)->ggtt->vm.mutex); if (!obj->userfault_count) goto out; __i915_gem_object_release_mmap_gtt(obj); /* * Ensure that the CPU's PTE are revoked and there are not outstanding * memory transactions from userspace before we return. The TLB * flushing implied above by changing the PTE above *should* be * sufficient, an extra barrier here just provides us with a bit * of paranoid documentation about our requirement to serialise * memory writes before touching registers / GSM. */ wmb(); out: mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); intel_runtime_pm_put(&i915->runtime_pm, wakeref); } void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct ttm_device *bdev = bo->bdev; drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); /* * We have exclusive access here via runtime suspend. All other callers * must first grab the rpm wakeref. */ GEM_BUG_ON(!obj->userfault_count); list_del(&obj->userfault_link); obj->userfault_count = 0; } void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; if (obj->ops->unmap_virtual) obj->ops->unmap_virtual(obj); spin_lock(&obj->mmo.lock); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { /* * vma_node_unmap for GTT mmaps handled already in * __i915_gem_object_release_mmap_gtt */ if (mmo->mmap_type == I915_MMAP_TYPE_GTT) continue; spin_unlock(&obj->mmo.lock); drm_vma_node_unmap(&mmo->vma_node, obj->base.dev->anon_inode->i_mapping); spin_lock(&obj->mmo.lock); } spin_unlock(&obj->mmo.lock); } static struct i915_mmap_offset * lookup_mmo(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type) { struct rb_node *rb; spin_lock(&obj->mmo.lock); rb = obj->mmo.offsets.rb_node; while (rb) { struct i915_mmap_offset *mmo = rb_entry(rb, typeof(*mmo), offset); if (mmo->mmap_type == mmap_type) { spin_unlock(&obj->mmo.lock); return mmo; } if (mmo->mmap_type < mmap_type) rb = rb->rb_right; else rb = rb->rb_left; } spin_unlock(&obj->mmo.lock); return NULL; } static struct i915_mmap_offset * insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) { struct rb_node *rb, **p; spin_lock(&obj->mmo.lock); rb = NULL; p = &obj->mmo.offsets.rb_node; while (*p) { struct i915_mmap_offset *pos; rb = *p; pos = rb_entry(rb, typeof(*pos), offset); if (pos->mmap_type == mmo->mmap_type) { spin_unlock(&obj->mmo.lock); drm_vma_offset_remove(obj->base.dev->vma_offset_manager, &mmo->vma_node); kfree(mmo); return pos; } if (pos->mmap_type < mmo->mmap_type) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&mmo->offset, rb, p); rb_insert_color(&mmo->offset, &obj->mmo.offsets); spin_unlock(&obj->mmo.lock); return mmo; } static struct i915_mmap_offset * mmap_offset_attach(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_mmap_offset *mmo; int err; GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); mmo = lookup_mmo(obj, mmap_type); if (mmo) goto out; mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); if (!mmo) return ERR_PTR(-ENOMEM); mmo->obj = obj; mmo->mmap_type = mmap_type; drm_vma_node_reset(&mmo->vma_node); err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, &mmo->vma_node, obj->base.size / PAGE_SIZE); if (likely(!err)) goto insert; /* Attempt to reap some mmap space from dead objects */ err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, NULL); if (err) goto err; i915_gem_drain_freed_objects(i915); err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, &mmo->vma_node, obj->base.size / PAGE_SIZE); if (err) goto err; insert: mmo = insert_mmo(obj, mmo); GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); out: if (file) drm_vma_node_allow_once(&mmo->vma_node, file); return mmo; err: kfree(mmo); return ERR_PTR(err); } static int __assign_mmap_offset(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type, u64 *offset, struct drm_file *file) { struct i915_mmap_offset *mmo; if (i915_gem_object_never_mmap(obj)) return -ENODEV; if (obj->ops->mmap_offset) { if (mmap_type != I915_MMAP_TYPE_FIXED) return -ENODEV; *offset = obj->ops->mmap_offset(obj); return 0; } if (mmap_type == I915_MMAP_TYPE_FIXED) return -ENODEV; if (mmap_type != I915_MMAP_TYPE_GTT && !i915_gem_object_has_struct_page(obj) && !i915_gem_object_has_iomem(obj)) return -ENODEV; mmo = mmap_offset_attach(obj, mmap_type, file); if (IS_ERR(mmo)) return PTR_ERR(mmo); *offset = drm_vma_node_offset_addr(&mmo->vma_node); return 0; } static int __assign_mmap_offset_handle(struct drm_file *file, u32 handle, enum i915_mmap_type mmap_type, u64 *offset) { struct drm_i915_gem_object *obj; int err; obj = i915_gem_object_lookup(file, handle); if (!obj) return -ENOENT; err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_put; err = __assign_mmap_offset(obj, mmap_type, offset, file); i915_gem_object_unlock(obj); out_put: i915_gem_object_put(obj); return err; } int i915_gem_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset) { struct drm_i915_private *i915 = to_i915(dev); enum i915_mmap_type mmap_type; if (HAS_LMEM(to_i915(dev))) mmap_type = I915_MMAP_TYPE_FIXED; else if (pat_enabled()) mmap_type = I915_MMAP_TYPE_WC; else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return -ENODEV; else mmap_type = I915_MMAP_TYPE_GTT; return __assign_mmap_offset_handle(file, handle, mmap_type, offset); } /** * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data * @file: GEM object info * * Simply returns the fake offset to userspace so it can mmap it. * The mmap call will end up in drm_gem_mmap(), which will set things * up so we can get faults in the handler above. * * The fault handler will take care of binding the object into the GTT * (since it may have been evicted to make room for something), allocating * a fence register, and mapping the appropriate aperture address into * userspace. */ int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_mmap_offset *args = data; enum i915_mmap_type type; int err; /* * Historically we failed to check args.pad and args.offset * and so we cannot use those fields for user input and we cannot * add -EINVAL for them as the ABI is fixed, i.e. old userspace * may be feeding in garbage in those fields. * * if (args->pad) return -EINVAL; is verbotten! */ err = i915_user_extensions(u64_to_user_ptr(args->extensions), NULL, 0, NULL); if (err) return err; switch (args->flags) { case I915_MMAP_OFFSET_GTT: if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return -ENODEV; type = I915_MMAP_TYPE_GTT; break; case I915_MMAP_OFFSET_WC: if (!pat_enabled()) return -ENODEV; type = I915_MMAP_TYPE_WC; break; case I915_MMAP_OFFSET_WB: type = I915_MMAP_TYPE_WB; break; case I915_MMAP_OFFSET_UC: if (!pat_enabled()) return -ENODEV; type = I915_MMAP_TYPE_UC; break; case I915_MMAP_OFFSET_FIXED: type = I915_MMAP_TYPE_FIXED; break; default: return -EINVAL; } return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); } static void vm_open(struct vm_area_struct *vma) { struct i915_mmap_offset *mmo = vma->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; GEM_BUG_ON(!obj); i915_gem_object_get(obj); } static void vm_close(struct vm_area_struct *vma) { struct i915_mmap_offset *mmo = vma->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; GEM_BUG_ON(!obj); i915_gem_object_put(obj); } static const struct vm_operations_struct vm_ops_gtt = { .fault = vm_fault_gtt, .access = vm_access, .open = vm_open, .close = vm_close, }; static const struct vm_operations_struct vm_ops_cpu = { .fault = vm_fault_cpu, .access = vm_access, .open = vm_open, .close = vm_close, }; static int singleton_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = file->private_data; cmpxchg(&i915->gem.mmap_singleton, file, NULL); drm_dev_put(&i915->drm); return 0; } static const struct file_operations singleton_fops = { .owner = THIS_MODULE, .release = singleton_release, }; static struct file *mmap_singleton(struct drm_i915_private *i915) { struct file *file; rcu_read_lock(); file = READ_ONCE(i915->gem.mmap_singleton); if (file && !get_file_rcu(file)) file = NULL; rcu_read_unlock(); if (file) return file; file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); if (IS_ERR(file)) return file; /* Everyone shares a single global address space */ file->f_mapping = i915->drm.anon_inode->i_mapping; smp_store_mb(i915->gem.mmap_singleton, file); drm_dev_get(&i915->drm); return file; } static int i915_gem_object_mmap(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo, struct vm_area_struct *vma) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_device *dev = &i915->drm; struct file *anon; if (i915_gem_object_is_readonly(obj)) { if (vma->vm_flags & VM_WRITE) { i915_gem_object_put(obj); return -EINVAL; } vm_flags_clear(vma, VM_MAYWRITE); } anon = mmap_singleton(to_i915(dev)); if (IS_ERR(anon)) { i915_gem_object_put(obj); return PTR_ERR(anon); } vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); /* * We keep the ref on mmo->obj, not vm_file, but we require * vma->vm_file->f_mapping, see vma_link(), for later revocation. * Our userspace is accustomed to having per-file resource cleanup * (i.e. contexts, objects and requests) on their close(fd), which * requires avoiding extraneous references to their filp, hence why * we prefer to use an anonymous file for their mmaps. */ vma_set_file(vma, anon); /* Drop the initial creation reference, the vma is now holding one. */ fput(anon); if (obj->ops->mmap_ops) { vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); vma->vm_ops = obj->ops->mmap_ops; vma->vm_private_data = obj->base.vma_node.driver_private; return 0; } vma->vm_private_data = mmo; switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); vma->vm_ops = &vm_ops_cpu; break; case I915_MMAP_TYPE_FIXED: GEM_WARN_ON(1); fallthrough; case I915_MMAP_TYPE_WB: vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = &vm_ops_cpu; break; case I915_MMAP_TYPE_UC: vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); vma->vm_ops = &vm_ops_cpu; break; case I915_MMAP_TYPE_GTT: vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); vma->vm_ops = &vm_ops_gtt; break; } vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); return 0; } /* * This overcomes the limitation in drm_gem_mmap's assignment of a * drm_gem_object as the vma->vm_private_data. Since we need to * be able to resolve multiple mmap offsets which could be tied * to a single gem object. */ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_vma_offset_node *node; struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_i915_gem_object *obj = NULL; struct i915_mmap_offset *mmo = NULL; if (drm_dev_is_unplugged(dev)) return -ENODEV; rcu_read_lock(); drm_vma_offset_lock_lookup(dev->vma_offset_manager); node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, vma->vm_pgoff, vma_pages(vma)); if (node && drm_vma_node_is_allowed(node, priv)) { /* * Skip 0-refcnted objects as it is in the process of being * destroyed and will be invalid when the vma manager lock * is released. */ if (!node->driver_private) { mmo = container_of(node, struct i915_mmap_offset, vma_node); obj = i915_gem_object_get_rcu(mmo->obj); GEM_BUG_ON(obj && obj->ops->mmap_ops); } else { obj = i915_gem_object_get_rcu (container_of(node, struct drm_i915_gem_object, base.vma_node)); GEM_BUG_ON(obj && !obj->ops->mmap_ops); } } drm_vma_offset_unlock_lookup(dev->vma_offset_manager); rcu_read_unlock(); if (!obj) return node ? -EACCES : -EINVAL; return i915_gem_object_mmap(obj, mmo, vma); } int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_device *dev = &i915->drm; struct i915_mmap_offset *mmo = NULL; enum i915_mmap_type mmap_type; struct i915_ggtt *ggtt = to_gt(i915)->ggtt; if (drm_dev_is_unplugged(dev)) return -ENODEV; /* handle ttm object */ if (obj->ops->mmap_ops) { /* * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset * to calculate page offset so set that up. */ vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node); } else { /* handle stolen and smem objects */ mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC; mmo = mmap_offset_attach(obj, mmap_type, NULL); if (IS_ERR(mmo)) return PTR_ERR(mmo); } /* * When we install vm_ops for mmap we are too late for * the vm_ops->open() which increases the ref_count of * this obj and then it gets decreased by the vm_ops->close(). * To balance this increase the obj ref_count here. */ obj = i915_gem_object_get(obj); return i915_gem_object_mmap(obj, mmo, vma); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_gem_mman.c" #endif
linux-master
drivers/gpu/drm/i915/gem/i915_gem_mman.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/jiffies.h> #include <drm/drm_file.h> #include "i915_drv.h" #include "i915_file_private.h" #include "i915_gem_context.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" /* * 20ms is a fairly arbitrary limit (greater than the average frame time) * chosen to prevent the CPU getting more than a frame ahead of the GPU * (when using lax throttling for the frontbuffer). We also use it to * offer free GPU waitboosts for severely congested workloads. */ #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) /* * Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * * Note that if we were to use the current jiffies each time around the loop, * we wouldn't escape the function with any frames outstanding if the time to * render a frame was over 20ms. * * This should get us reasonable parallelism between CPU and GPU but also * relatively low latency when blocking on a particular request to finish. */ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_private *i915 = to_i915(dev); struct i915_gem_context *ctx; unsigned long idx; long ret; /* ABI: return -EIO if already wedged */ ret = intel_gt_terminally_wedged(to_gt(i915)); if (ret) return ret; rcu_read_lock(); xa_for_each(&file_priv->context_xa, idx, ctx) { struct i915_gem_engines_iter it; struct intel_context *ce; if (!kref_get_unless_zero(&ctx->ref)) continue; rcu_read_unlock(); for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq, *target = NULL; if (!ce->timeline) continue; mutex_lock(&ce->timeline->mutex); list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { if (i915_request_completed(rq)) break; if (time_after(rq->emitted_jiffies, recent_enough)) continue; target = i915_request_get(rq); break; } mutex_unlock(&ce->timeline->mutex); if (!target) continue; ret = i915_request_wait(target, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); i915_request_put(target); if (ret < 0) break; } i915_gem_context_unlock_engines(ctx); i915_gem_context_put(ctx); rcu_read_lock(); } rcu_read_unlock(); return ret < 0 ? ret : 0; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <drm/drm_cache.h> #include "gt/intel_gt.h" #include "gt/intel_tlb.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_gem_lmem.h" #include "i915_gem_mman.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned long supported = RUNTIME_INFO(i915)->page_sizes; bool shrinkable; int i; assert_object_held_shared(obj); if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_DONTNEED; /* Make the pages coherent with the GPU (flushing any swapin). */ if (obj->cache_dirty) { WARN_ON_ONCE(IS_DGFX(i915)); obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(pages); obj->cache_dirty = false; } obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; obj->mm.get_dma_page.sg_pos = pages->sgl; obj->mm.get_dma_page.sg_idx = 0; obj->mm.pages = pages; obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); GEM_BUG_ON(!obj->mm.page_sizes.phys); /* * Calculate the supported page-sizes which fit into the given * sg_page_sizes. This will give us the page-sizes which we may be able * to use opportunistically when later inserting into the GTT. For * example if phys=2G, then in theory we should be able to use 1G, 2M, * 64K or 4K pages, although in practice this will depend on a number of * other factors. */ obj->mm.page_sizes.sg = 0; for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { if (obj->mm.page_sizes.phys & ~0u << i) obj->mm.page_sizes.sg |= BIT(i); } GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); shrinkable = i915_gem_object_is_shrinkable(obj); if (i915_gem_object_is_tiled(obj) && i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_set_tiling_quirk(obj); GEM_BUG_ON(!list_empty(&obj->mm.link)); atomic_inc(&obj->mm.shrink_pin); shrinkable = false; } if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { struct list_head *list; unsigned long flags; assert_object_held(obj); spin_lock_irqsave(&i915->mm.obj_lock, flags); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; if (obj->mm.madv != I915_MADV_WILLNEED) list = &i915->mm.purge_list; else list = &i915->mm.shrink_list; list_add_tail(&obj->mm.link, list); atomic_set(&obj->mm.shrink_pin, 0); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; assert_object_held_shared(obj); if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { drm_dbg(&i915->drm, "Attempting to obtain a purgeable object\n"); return -EFAULT; } err = obj->ops->get_pages(obj); GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); return err; } /* Ensure that the associated pages are gathered from the backing storage * and pinned into our object. i915_gem_object_pin_pages() may be called * multiple times before they are released by a single call to * i915_gem_object_unpin_pages() - once the pages are no longer referenced * either as a result of memory pressure (reaping pages under the shrinker) * or as the object is itself released. */ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { int err; assert_object_held(obj); assert_object_held_shared(obj); if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); if (err) return err; smp_mb__before_atomic(); } atomic_inc(&obj->mm.pages_pin_count); return 0; } int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) { struct i915_gem_ww_ctx ww; int err; i915_gem_ww_ctx_init(&ww, true); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_gem_object_pin_pages(obj); if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); return err; } /* Immediately discard the backing storage */ int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { if (obj->ops->truncate) return obj->ops->truncate(obj); return 0; } static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; rcu_read_lock(); radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) radix_tree_delete(&obj->mm.get_page.radix, iter.index); radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); rcu_read_unlock(); } static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) { if (is_vmalloc_addr(ptr)) vunmap(ptr); } static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_gt *gt; int id; for_each_gt(gt, i915, id) { if (!obj->mm.tlb[id]) return; intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); obj->mm.tlb[id] = 0; } } struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; assert_object_held_shared(obj); pages = fetch_and_zero(&obj->mm.pages); if (IS_ERR_OR_NULL(pages)) return pages; if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_WILLNEED; if (!i915_gem_object_has_self_managed_shrink_list(obj)) i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { unmap_object(obj, page_mask_bits(obj->mm.mapping)); obj->mm.mapping = NULL; } __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; flush_tlb_invalidate(obj); return pages; } int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; if (i915_gem_object_has_pinned_pages(obj)) return -EBUSY; /* May be called by shrinker from within get_pages() (on another bo) */ assert_object_held_shared(obj); i915_gem_object_release_mmap_offset(obj); /* * ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt * lists early. */ pages = __i915_gem_object_unset_pages(obj); /* * XXX Temporary hijinx to avoid updating all backends to handle * NULL pages. In the future, when we have more asynchronous * get_pages backends we should be better able to handle the * cancellation of the async task in a more uniform manner. */ if (!IS_ERR_OR_NULL(pages)) obj->ops->put_pages(obj, pages); return 0; } /* The 'mapping' part of i915_gem_object_pin_map() below */ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; struct page *stack[32], **pages = stack, *page; struct sgt_iter iter; pgprot_t pgprot; void *vaddr; switch (type) { default: MISSING_CASE(type); fallthrough; /* to use PAGE_KERNEL anyway */ case I915_MAP_WB: /* * On 32b, highmem using a finite set of indirect PTE (i.e. * vmap) to provide virtual mappings of the high pages. * As these are finite, map_new_virtual() must wait for some * other kmap() to finish when it runs out. If we map a large * number of objects, there is no method for it to tell us * to release the mappings, and we deadlock. * * However, if we make an explicit vmap of the page, that * uses a larger vmalloc arena, and also has the ability * to tell us to release unwanted mappings. Most importantly, * it will fail and propagate an error instead of waiting * forever. * * So if the page is beyond the 32b boundary, make an explicit * vmap. */ if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) return page_address(sg_page(obj->mm.pages->sgl)); pgprot = PAGE_KERNEL; break; case I915_MAP_WC: pgprot = pgprot_writecombine(PAGE_KERNEL_IO); break; } if (n_pages > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return ERR_PTR(-ENOMEM); } i = 0; for_each_sgt_page(page, iter, obj->mm.pages) pages[i++] = page; vaddr = vmap(pages, n_pages, 0, pgprot); if (pages != stack) kvfree(pages); return vaddr ?: ERR_PTR(-ENOMEM); } static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, enum i915_map_type type) { resource_size_t iomap = obj->mm.region->iomap.base - obj->mm.region->region.start; unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; unsigned long stack[32], *pfns = stack, i; struct sgt_iter iter; dma_addr_t addr; void *vaddr; GEM_BUG_ON(type != I915_MAP_WC); if (n_pfn > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); if (!pfns) return ERR_PTR(-ENOMEM); } i = 0; for_each_sgt_daddr(addr, iter, obj->mm.pages) pfns[i++] = (iomap + addr) >> PAGE_SHIFT; vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); if (pfns != stack) kvfree(pfns); return vaddr ?: ERR_PTR(-ENOMEM); } /* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { enum i915_map_type has_type; bool pinned; void *ptr; int err; if (!i915_gem_object_has_struct_page(obj) && !i915_gem_object_has_iomem(obj)) return ERR_PTR(-ENXIO); if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY)) return ERR_PTR(-EINVAL); assert_object_held(obj); pinned = !(type & I915_MAP_OVERRIDE); type &= ~I915_MAP_OVERRIDE; if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); if (err) return ERR_PTR(err); smp_mb__before_atomic(); } atomic_inc(&obj->mm.pages_pin_count); pinned = false; } GEM_BUG_ON(!i915_gem_object_has_pages(obj)); /* * For discrete our CPU mappings needs to be consistent in order to * function correctly on !x86. When mapping things through TTM, we use * the same rules to determine the caching type. * * The caching rules, starting from DG1: * * - If the object can be placed in device local-memory, then the * pages should be allocated and mapped as write-combined only. * * - Everything else is always allocated and mapped as write-back, * with the guarantee that everything is also coherent with the * GPU. * * Internal users of lmem are already expected to get this right, so no * fudging needed there. */ if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) { if (type != I915_MAP_WC && !obj->mm.n_placements) { ptr = ERR_PTR(-ENODEV); goto err_unpin; } type = I915_MAP_WC; } else if (IS_DGFX(to_i915(obj->base.dev))) { type = I915_MAP_WB; } ptr = page_unpack_bits(obj->mm.mapping, &has_type); if (ptr && has_type != type) { if (pinned) { ptr = ERR_PTR(-EBUSY); goto err_unpin; } unmap_object(obj, ptr); ptr = obj->mm.mapping = NULL; } if (!ptr) { err = i915_gem_object_wait_moving_fence(obj, true); if (err) { ptr = ERR_PTR(err); goto err_unpin; } if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled())) ptr = ERR_PTR(-ENODEV); else if (i915_gem_object_has_struct_page(obj)) ptr = i915_gem_object_map_page(obj, type); else ptr = i915_gem_object_map_pfn(obj, type); if (IS_ERR(ptr)) goto err_unpin; obj->mm.mapping = page_pack_bits(ptr, type); } return ptr; err_unpin: atomic_dec(&obj->mm.pages_pin_count); return ptr; } void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, enum i915_map_type type) { void *ret; i915_gem_object_lock(obj, NULL); ret = i915_gem_object_pin_map(obj, type); i915_gem_object_unlock(obj); return ret; } void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, unsigned long offset, unsigned long size) { enum i915_map_type has_type; void *ptr; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), offset, size, obj->base.size)); wmb(); /* let all previous writes be visible to coherent partners */ obj->mm.dirty = true; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) return; ptr = page_unpack_bits(obj->mm.mapping, &has_type); if (has_type == I915_MAP_WC) return; drm_clflush_virt_range(ptr + offset, size); if (size == obj->base.size) { obj->write_domain &= ~I915_GEM_DOMAIN_CPU; obj->cache_dirty = false; } } void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!obj->mm.mapping); /* * We allow removing the mapping from underneath pinned pages! * * Furthermore, since this is an unsafe operation reserved only * for construction time manipulation, we ignore locking prudence. */ unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); i915_gem_object_unpin_map(obj); } struct scatterlist * __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj, struct i915_gem_object_page_iter *iter, pgoff_t n, unsigned int *offset) { const bool dma = iter == &obj->mm.get_dma_page || iter == &obj->ttm.get_io_page; unsigned int idx, count; struct scatterlist *sg; might_sleep(); GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); if (!i915_gem_object_has_pinned_pages(obj)) assert_object_held(obj); /* As we iterate forward through the sg, we record each entry in a * radixtree for quick repeated (backwards) lookups. If we have seen * this index previously, we will have an entry for it. * * Initial lookup is O(N), but this is amortized to O(1) for * sequential page access (where each new request is consecutive * to the previous one). Repeated lookups are O(lg(obj->base.size)), * i.e. O(1) with a large constant! */ if (n < READ_ONCE(iter->sg_idx)) goto lookup; mutex_lock(&iter->lock); /* We prefer to reuse the last sg so that repeated lookup of this * (or the subsequent) sg are fast - comparing against the last * sg is faster than going through the radixtree. */ sg = iter->sg_pos; idx = iter->sg_idx; count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); while (idx + count <= n) { void *entry; unsigned long i; int ret; /* If we cannot allocate and insert this entry, or the * individual pages from this range, cancel updating the * sg_idx so that on this lookup we are forced to linearly * scan onwards, but on future lookups we will try the * insertion again (in which case we need to be careful of * the error return reporting that we have already inserted * this index). */ ret = radix_tree_insert(&iter->radix, idx, sg); if (ret && ret != -EEXIST) goto scan; entry = xa_mk_value(idx); for (i = 1; i < count; i++) { ret = radix_tree_insert(&iter->radix, idx + i, entry); if (ret && ret != -EEXIST) goto scan; } idx += count; sg = ____sg_next(sg); count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); } scan: iter->sg_pos = sg; iter->sg_idx = idx; mutex_unlock(&iter->lock); if (unlikely(n < idx)) /* insertion completed by another thread */ goto lookup; /* In case we failed to insert the entry into the radixtree, we need * to look beyond the current sg. */ while (idx + count <= n) { idx += count; sg = ____sg_next(sg); count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); } *offset = n - idx; return sg; lookup: rcu_read_lock(); sg = radix_tree_lookup(&iter->radix, n); GEM_BUG_ON(!sg); /* If this index is in the middle of multi-page sg entry, * the radix tree will contain a value entry that points * to the start of that range. We will return the pointer to * the base page and the offset of this page within the * sg entry's range. */ *offset = 0; if (unlikely(xa_is_value(sg))) { unsigned long base = xa_to_value(sg); sg = radix_tree_lookup(&iter->radix, base); GEM_BUG_ON(!sg); *offset = n - base; } rcu_read_unlock(); return sg; } struct page * __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n) { struct scatterlist *sg; unsigned int offset; GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); sg = i915_gem_object_get_sg(obj, n, &offset); return nth_page(sg_page(sg), offset); } /* Like i915_gem_object_get_page(), but mark the returned page dirty */ struct page * __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n) { struct page *page; page = i915_gem_object_get_page(obj, n); if (!obj->mm.dirty) set_page_dirty(page); return page; } dma_addr_t __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n, unsigned int *len) { struct scatterlist *sg; unsigned int offset; sg = i915_gem_object_get_sg_dma(obj, n, &offset); if (len) *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); return sg_dma_address(sg) + (offset << PAGE_SHIFT); } dma_addr_t __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n) { return i915_gem_object_get_dma_address_len(obj, n, NULL); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_pages.c
/* * SPDX-License-Identifier: MIT * * Copyright 2012 Red Hat Inc */ #include <linux/dma-buf.h> #include <linux/highmem.h> #include <linux/dma-resv.h> #include <linux/module.h> #include <asm/smp.h> #include "gem/i915_gem_dmabuf.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" MODULE_IMPORT_NS(DMA_BUF); I915_SELFTEST_DECLARE(static bool force_different_devices;) static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) { return to_intel_bo(buf->priv); } static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf); struct sg_table *sgt; struct scatterlist *src, *dst; int ret, i; /* * Make a copy of the object's sgt, so that we can make an independent * mapping */ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { ret = -ENOMEM; goto err; } ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; dst = sgt->sgl; for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->orig_nents, i) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); } ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); if (ret) goto err_free_sg; return sgt; err_free_sg: sg_free_table(sgt); err_free: kfree(sgt); err: return ERR_PTR(ret); } static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); void *vaddr; vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); iosys_map_set_vaddr(map, vaddr); return 0; } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); } static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_private *i915 = to_i915(obj->base.dev); int ret; if (obj->base.size < vma->vm_end - vma->vm_start) return -EINVAL; if (HAS_LMEM(i915)) return drm_gem_prime_mmap(&obj->base, vma); if (!obj->base.filp) return -ENODEV; ret = call_mmap(obj->base.filp, vma); if (ret) return ret; vma_set_file(vma, obj->base.filp); return 0; } static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); struct i915_gem_ww_ctx ww; int err; i915_gem_ww_ctx_init(&ww, true); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_gem_object_pin_pages(obj); if (!err) { err = i915_gem_object_set_to_cpu_domain(obj, write); i915_gem_object_unpin_pages(obj); } if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); return err; } static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct i915_gem_ww_ctx ww; int err; i915_gem_ww_ctx_init(&ww, true); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_gem_object_pin_pages(obj); if (!err) { err = i915_gem_object_set_to_gtt_domain(obj, false); i915_gem_object_unpin_pages(obj); } if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); return err; } static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf); struct i915_gem_ww_ctx ww; int err; if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) return -EOPNOTSUPP; for_i915_gem_ww(&ww, err, true) { err = i915_gem_object_lock(obj, &ww); if (err) continue; err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM); if (err) continue; err = i915_gem_object_wait_migration(obj, 0); if (err) continue; err = i915_gem_object_pin_pages(obj); } return err; } static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf); i915_gem_object_unpin_pages(obj); } static const struct dma_buf_ops i915_dmabuf_ops = { .attach = i915_gem_dmabuf_attach, .detach = i915_gem_dmabuf_detach, .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, .begin_cpu_access = i915_gem_begin_cpu_access, .end_cpu_access = i915_gem_end_cpu_access, }; struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &i915_dmabuf_ops; exp_info.size = gem_obj->size; exp_info.flags = flags; exp_info.priv = gem_obj; exp_info.resv = obj->base.resv; if (obj->ops->dmabuf_export) { int ret = obj->ops->dmabuf_export(obj); if (ret) return ERR_PTR(ret); } return drm_gem_dmabuf_export(gem_obj->dev, &exp_info); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *sgt; assert_object_held(obj); sgt = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) return PTR_ERR(sgt); /* * DG1 is special here since it still snoops transactions even with * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We * might need to revisit this as we add new discrete platforms. * * XXX: Consider doing a vmap flush or something, where possible. * Currently we just do a heavy handed wbinvd_on_all_cpus() here since * the underlying sg_table might not even point to struct pages, so we * can't just call drm_clflush_sg or similar, like we do elsewhere in * the driver. */ if (i915_gem_object_can_bypass_llc(obj) || (!HAS_LLC(i915) && !IS_DG1(i915))) wbinvd_on_all_cpus(); __i915_gem_object_set_pages(obj, sgt); return 0; } static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, struct sg_table *sgt) { dma_buf_unmap_attachment(obj->base.import_attach, sgt, DMA_BIDIRECTIONAL); } static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { .name = "i915_gem_object_dmabuf", .get_pages = i915_gem_object_get_pages_dmabuf, .put_pages = i915_gem_object_put_pages_dmabuf, }; struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { static struct lock_class_key lock_class; struct dma_buf_attachment *attach; struct drm_i915_gem_object *obj; int ret; /* is this one of own objects? */ if (dma_buf->ops == &i915_dmabuf_ops) { obj = dma_buf_to_obj(dma_buf); /* is it from our device? */ if (obj->base.dev == dev && !I915_SELFTEST_ONLY(force_different_devices)) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ return &i915_gem_object_get(obj)->base; } } if (i915_gem_object_size_2big(dma_buf->size)) return ERR_PTR(-E2BIG); /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) return ERR_CAST(attach); get_dma_buf(dma_buf); obj = i915_gem_object_alloc(); if (!obj) { ret = -ENOMEM; goto fail_detach; } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, I915_BO_ALLOC_USER); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; /* We use GTT as shorthand for a coherent domain, one that is * neither in the GPU cache nor in the CPU cache, where all * writes are immediately visible in memory. (That's not strictly * true, but it's close! There are internal buffers such as the * write-combined buffer or a delay through the chipset for GTT * writes that do require us to treat GTT as a separate cache domain.) */ obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; return &obj->base; fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_dmabuf.c" #include "selftests/i915_gem_dmabuf.c" #endif
linux-master
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> #include "i915_drv.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" #include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" /** * i915_ttm_backup_free - Free any backup attached to this object * @obj: The object whose backup is to be freed. */ void i915_ttm_backup_free(struct drm_i915_gem_object *obj) { if (obj->ttm.backup) { i915_gem_object_put(obj->ttm.backup); obj->ttm.backup = NULL; } } /** * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore * @base: The i915_gem_apply_to_region we derive from. * @allow_gpu: Whether using the gpu blitter is allowed. * @backup_pinned: On backup, backup also pinned objects. */ struct i915_gem_ttm_pm_apply { struct i915_gem_apply_to_region base; bool allow_gpu : 1; bool backup_pinned : 1; }; static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, struct drm_i915_gem_object *obj) { struct i915_gem_ttm_pm_apply *pm_apply = container_of(apply, typeof(*pm_apply), base); struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct ttm_buffer_object *backup_bo; struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); struct drm_i915_gem_object *backup; struct ttm_operation_ctx ctx = {}; unsigned int flags; int err = 0; if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup) return 0; if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); if (!pm_apply->backup_pinned || (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY))) return 0; if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) return 0; /* * It seems that we might have some framebuffers still pinned at this * stage, but for such objects we might also need to deal with the CCS * aux state. Make sure we force the save/restore of the CCS state, * otherwise we might observe display corruption, when returning from * suspend. */ flags = 0; if (i915_gem_object_needs_ccs_pages(obj)) { WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj)); WARN_ON_ONCE(!pm_apply->allow_gpu); flags = I915_BO_ALLOC_CCS_AUX; } backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], obj->base.size, 0, flags); if (IS_ERR(backup)) return PTR_ERR(backup); err = i915_gem_object_lock(backup, apply->ww); if (err) goto out_no_lock; backup_bo = i915_gem_to_ttm(backup); err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); if (err) goto out_no_populate; err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); if (err) { drm_err(&i915->drm, "Unable to copy from device to system memory, err:%pe\n", ERR_PTR(err)); goto out_no_populate; } ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = backup; return 0; out_no_populate: i915_gem_ww_unlock_single(backup); out_no_lock: i915_gem_object_put(backup); return err; } static int i915_ttm_recover(struct i915_gem_apply_to_region *apply, struct drm_i915_gem_object *obj) { i915_ttm_backup_free(obj); return 0; } /** * i915_ttm_recover_region - Free the backup of all objects of a region * @mr: The memory region * * Checks all objects of a region if there is backup attached and if so * frees that backup. Typically this is called to recover after a partially * performed backup. */ void i915_ttm_recover_region(struct intel_memory_region *mr) { static const struct i915_gem_apply_to_region_ops recover_ops = { .process_obj = i915_ttm_recover, }; struct i915_gem_apply_to_region apply = {.ops = &recover_ops}; int ret; ret = i915_gem_process_region(mr, &apply); GEM_WARN_ON(ret); } /** * i915_ttm_backup_region - Back up all objects of a region to smem. * @mr: The memory region * @flags: TTM backup flags * * Loops over all objects of a region and either evicts them if they are * evictable or backs them up using a backup object if they are pinned. * * Return: Zero on success. Negative error code on error. */ int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags) { static const struct i915_gem_apply_to_region_ops backup_ops = { .process_obj = i915_ttm_backup, }; struct i915_gem_ttm_pm_apply pm_apply = { .base = {.ops = &backup_ops}, .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, .backup_pinned = flags & I915_TTM_BACKUP_PINNED, }; return i915_gem_process_region(mr, &pm_apply.base); } static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, struct drm_i915_gem_object *obj) { struct i915_gem_ttm_pm_apply *pm_apply = container_of(apply, typeof(*pm_apply), base); struct drm_i915_gem_object *backup = obj->ttm.backup; struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup); struct ttm_operation_ctx ctx = {}; int err; if (!backup) return 0; if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY)) return 0; err = i915_gem_object_lock(backup, apply->ww); if (err) return err; /* Content may have been swapped. */ if (!backup_bo->resource) err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); if (!err) err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); GEM_WARN_ON(err); ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = NULL; err = 0; } i915_gem_ww_unlock_single(backup); if (!err) i915_gem_object_put(backup); return err; } /** * i915_ttm_restore_region - Restore backed-up objects of a region from smem. * @mr: The memory region * @flags: TTM backup flags * * Loops over all objects of a region and if they are backed-up, restores * them from smem. * * Return: Zero on success. Negative error code on error. */ int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags) { static const struct i915_gem_apply_to_region_ops restore_ops = { .process_obj = i915_ttm_restore, }; struct i915_gem_ttm_pm_apply pm_apply = { .base = {.ops = &restore_ops}, .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, }; return i915_gem_process_region(mr, &pm_apply.base); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2008-2015 Intel Corporation */ #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/pci.h> #include <linux/dma-buf.h> #include <linux/vmalloc.h> #include "gt/intel_gt_requests.h" #include "i915_trace.h" static bool swap_available(void) { return get_nr_swap_pages() > 0; } static bool can_release_pages(struct drm_i915_gem_object *obj) { /* Consider only shrinkable ojects. */ if (!i915_gem_object_is_shrinkable(obj)) return false; /* * We can only return physical pages to the system if we can either * discard the contents (because the user has marked them as being * purgeable) or if we can move their contents out to swap. */ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; } static bool drop_pages(struct drm_i915_gem_object *obj, unsigned long shrink, bool trylock_vm) { unsigned long flags; flags = 0; if (shrink & I915_SHRINK_ACTIVE) flags |= I915_GEM_OBJECT_UNBIND_ACTIVE; if (!(shrink & I915_SHRINK_BOUND)) flags |= I915_GEM_OBJECT_UNBIND_TEST; if (trylock_vm) flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK; if (i915_gem_object_unbind(obj, flags) == 0) return true; return false; } static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags) { if (obj->ops->shrink) { unsigned int shrink_flags = 0; if (!(flags & I915_SHRINK_ACTIVE)) shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT; if (flags & I915_SHRINK_WRITEBACK) shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK; return obj->ops->shrink(obj, shrink_flags); } return 0; } /** * i915_gem_shrink - Shrink buffer object caches * @ww: i915 gem ww acquire ctx, or NULL * @i915: i915 device * @target: amount of memory to make available, in pages * @nr_scanned: optional output for number of pages scanned (incremental) * @shrink: control flags for selecting cache types * * This function is the main interface to the shrinker. It will try to release * up to @target pages of main memory backing storage from buffer objects. * Selection of the specific caches can be done with @flags. This is e.g. useful * when purgeable objects should be removed from caches preferentially. * * Note that it's not guaranteed that released amount is actually available as * free system memory - the pages might still be in-used to due to other reasons * (like cpu mmaps) or the mm core has reused them before we could grab them. * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). * * Also note that any kind of pinning (both per-vma address space pins and * backing storage pins at the buffer object level) result in the shrinker code * having to skip the object. * * Returns: * The number of pages of backing storage actually released. */ unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww, struct drm_i915_private *i915, unsigned long target, unsigned long *nr_scanned, unsigned int shrink) { const struct { struct list_head *list; unsigned int bit; } phases[] = { { &i915->mm.purge_list, ~0u }, { &i915->mm.shrink_list, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND }, { NULL, 0 }, }, *phase; intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; int err = 0; /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); trace_i915_gem_shrink(i915, target, shrink); /* * Unbinding of objects will require HW access; Let us not wake the * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier. */ if (shrink & I915_SHRINK_BOUND) { wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) shrink &= ~I915_SHRINK_BOUND; } /* * When shrinking the active list, we should also consider active * contexts. Active contexts are pinned until they are retired, and * so can not be simply unbound to retire and unpin their pages. To * shrink the contexts, we must wait until the gpu is idle and * completed its switch to the kernel context. In short, we do * not have a good mechanism for idling a specific context, but * what we can do is give them a kick so that we do not keep idle * contexts around longer than is necessary. */ if (shrink & I915_SHRINK_ACTIVE) /* Retire requests to unpin all idle contexts */ intel_gt_retire_requests(to_gt(i915)); /* * As we may completely rewrite the (un)bound list whilst unbinding * (due to retiring requests) we have to strictly process only * one element of the list at the time, and recheck the list * on every iteration. * * In particular, we must hold a reference whilst removing the * object as we may end up waiting for and/or retiring the objects. * This might release the final reference (held by the active list) * and result in the object being freed from under us. This is * similar to the precautions the eviction code must take whilst * removing objects. * * Also note that although these lists do not hold a reference to * the object we can safely grab one here: The final object * unreferencing and the bound_list are both protected by the * dev->struct_mutex and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0. */ for (phase = phases; phase->list; phase++) { struct list_head still_in_list; struct drm_i915_gem_object *obj; unsigned long flags; if ((shrink & phase->bit) == 0) continue; INIT_LIST_HEAD(&still_in_list); /* * We serialize our access to unreferenced objects through * the use of the struct_mutex. While the objects are not * yet freed (due to RCU then a workqueue) we still want * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. */ spin_lock_irqsave(&i915->mm.obj_lock, flags); while (count < target && (obj = list_first_entry_or_null(phase->list, typeof(*obj), mm.link))) { list_move_tail(&obj->mm.link, &still_in_list); if (shrink & I915_SHRINK_VMAPS && !is_vmalloc_addr(obj->mm.mapping)) continue; if (!(shrink & I915_SHRINK_ACTIVE) && i915_gem_object_is_framebuffer(obj)) continue; if (!can_release_pages(obj)) continue; if (!kref_get_unless_zero(&obj->base.refcount)) continue; spin_unlock_irqrestore(&i915->mm.obj_lock, flags); /* May arrive from get_pages on another bo */ if (!ww) { if (!i915_gem_object_trylock(obj, NULL)) goto skip; } else { err = i915_gem_object_lock(obj, ww); if (err) goto skip; } if (drop_pages(obj, shrink, trylock_vm) && !__i915_gem_object_put_pages(obj) && !try_to_writeback(obj, shrink)) count += obj->base.size >> PAGE_SHIFT; if (!ww) i915_gem_object_unlock(obj); scanned += obj->base.size >> PAGE_SHIFT; skip: i915_gem_object_put(obj); spin_lock_irqsave(&i915->mm.obj_lock, flags); if (err) break; } list_splice_tail(&still_in_list, phase->list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (err) break; } if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(&i915->runtime_pm, wakeref); if (err) return err; if (nr_scanned) *nr_scanned += scanned; return count; } /** * i915_gem_shrink_all - Shrink buffer object caches completely * @i915: i915 device * * This is a simple wraper around i915_gem_shrink() to aggressively shrink all * caches completely. It also first waits for and retires all outstanding * requests to also be able to release backing storage for active objects. * * This should only be used in code to intentionally quiescent the gpu or as a * last-ditch effort when memory seems to have run out. * * Returns: * The number of pages of backing storage actually released. */ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) { intel_wakeref_t wakeref; unsigned long freed = 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { freed = i915_gem_shrink(NULL, i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); } return freed; } static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); unsigned long num_objects; unsigned long count; count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT; num_objects = READ_ONCE(i915->mm.shrink_count); /* * Update our preferred vmscan batch size for the next pass. * Our rough guess for an effective batch size is roughly 2 * available GEM objects worth of pages. That is we don't want * the shrinker to fire, until it is worth the cost of freeing an * entire GEM object. */ if (num_objects) { unsigned long avg = 2 * count / num_objects; i915->mm.shrinker.batch = max((i915->mm.shrinker.batch + avg) >> 1, 128ul /* default SHRINK_BATCH */); } return count; } static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); unsigned long freed; sc->nr_scanned = 0; freed = i915_gem_shrink(NULL, i915, sc->nr_to_scan, &sc->nr_scanned, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { freed += i915_gem_shrink(NULL, i915, sc->nr_to_scan - sc->nr_scanned, &sc->nr_scanned, I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_WRITEBACK); } } return sc->nr_scanned ? freed : SHRINK_STOP; } static int i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) { struct drm_i915_private *i915 = container_of(nb, struct drm_i915_private, mm.oom_notifier); struct drm_i915_gem_object *obj; unsigned long unevictable, available, freed_pages; intel_wakeref_t wakeref; unsigned long flags; freed_pages = 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_WRITEBACK); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not * being pointed to by hardware. */ available = unevictable = 0; spin_lock_irqsave(&i915->mm.obj_lock, flags); list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { if (!can_release_pages(obj)) unevictable += obj->base.size >> PAGE_SHIFT; else available += obj->base.size >> PAGE_SHIFT; } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (freed_pages || available) pr_info("Purging GPU memory, %lu pages freed, " "%lu pages still pinned, %lu pages left available.\n", freed_pages, unevictable, available); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; } static int i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) { struct drm_i915_private *i915 = container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct i915_vma *vma, *next; unsigned long freed_pages = 0; intel_wakeref_t wakeref; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ mutex_lock(&to_gt(i915)->ggtt->vm.mutex); list_for_each_entry_safe(vma, next, &to_gt(i915)->ggtt->vm.bound_list, vm_link) { unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; struct drm_i915_gem_object *obj = vma->obj; if (!vma->iomap || i915_vma_is_active(vma)) continue; if (!i915_gem_object_trylock(obj, NULL)) continue; if (__i915_vma_unbind(vma) == 0) freed_pages += count; i915_gem_object_unlock(obj); } mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; } void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) { i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; i915->mm.shrinker.count_objects = i915_gem_shrinker_count; i915->mm.shrinker.seeks = DEFAULT_SEEKS; i915->mm.shrinker.batch = 4096; drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker, "drm-i915_gem")); i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; drm_WARN_ON(&i915->drm, register_vmap_purge_notifier(&i915->mm.vmap_notifier)); } void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) { drm_WARN_ON(&i915->drm, unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); drm_WARN_ON(&i915->drm, unregister_oom_notifier(&i915->mm.oom_notifier)); unregister_shrinker(&i915->mm.shrinker); } void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, struct mutex *mutex) { if (!IS_ENABLED(CONFIG_LOCKDEP)) return; fs_reclaim_acquire(GFP_KERNEL); mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); mutex_release(&mutex->dep_map, _RET_IP_); fs_reclaim_release(GFP_KERNEL); } /** * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By * default all object types that support shrinking(see IS_SHRINKABLE), will also * make the object visible to the shrinker after allocating the system memory * pages. * @obj: The GEM object. * * This is typically used for special kernel internal objects that can't be * easily processed by the shrinker, like if they are perma-pinned. */ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); unsigned long flags; /* * We can only be called while the pages are pinned or when * the pages are released. If pinned, we should only be called * from a single caller under controlled conditions; and on release * only one caller may release us. Neither the two may cross. */ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0)) return; spin_lock_irqsave(&i915->mm.obj_lock, flags); if (!atomic_fetch_inc(&obj->mm.shrink_pin) && !list_empty(&obj->mm.link)) { list_del_init(&obj->mm.link); i915->mm.shrink_count--; i915->mm.shrink_memory -= obj->base.size; } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, struct list_head *head) { struct drm_i915_private *i915 = obj_to_i915(obj); unsigned long flags; if (!i915_gem_object_is_shrinkable(obj)) return; if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1)) return; spin_lock_irqsave(&i915->mm.obj_lock, flags); GEM_BUG_ON(!kref_read(&obj->base.refcount)); if (atomic_dec_and_test(&obj->mm.shrink_pin)) { GEM_BUG_ON(!list_empty(&obj->mm.link)); list_add_tail(&obj->mm.link, head); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } /** * __i915_gem_object_make_shrinkable - Move the object to the tail of the * shrinkable list. Objects on this list might be swapped out. Used with * WILLNEED objects. * @obj: The GEM object. * * DO NOT USE. This is intended to be called on very special objects that don't * yet have mm.pages, but are guaranteed to have potentially reclaimable pages * underneath. */ void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) { ___i915_gem_object_make_shrinkable(obj, &obj_to_i915(obj)->mm.shrink_list); } /** * __i915_gem_object_make_purgeable - Move the object to the tail of the * purgeable list. Objects on this list might be swapped out. Used with * DONTNEED objects. * @obj: The GEM object. * * DO NOT USE. This is intended to be called on very special objects that don't * yet have mm.pages, but are guaranteed to have potentially reclaimable pages * underneath. */ void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) { ___i915_gem_object_make_shrinkable(obj, &obj_to_i915(obj)->mm.purge_list); } /** * i915_gem_object_make_shrinkable - Move the object to the tail of the * shrinkable list. Objects on this list might be swapped out. Used with * WILLNEED objects. * @obj: The GEM object. * * MUST only be called on objects which have backing pages. * * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). */ void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); __i915_gem_object_make_shrinkable(obj); } /** * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable * list. Used with DONTNEED objects. Unlike with shrinkable objects, the * shrinker will attempt to discard the backing pages, instead of trying to swap * them out. * @obj: The GEM object. * * MUST only be called on objects which have backing pages. * * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). */ void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); __i915_gem_object_make_purgeable(obj); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <drm/drm_fourcc.h> #include "display/intel_display.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_gem_create.h" #include "i915_trace.h" #include "i915_user_extensions.h" static u32 object_max_page_size(struct intel_memory_region **placements, unsigned int n_placements) { u32 max_page_size = 0; int i; for (i = 0; i < n_placements; i++) { struct intel_memory_region *mr = placements[i]; GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); max_page_size = max_t(u32, max_page_size, mr->min_page_size); } GEM_BUG_ON(!max_page_size); return max_page_size; } static int object_set_placements(struct drm_i915_gem_object *obj, struct intel_memory_region **placements, unsigned int n_placements) { struct intel_memory_region **arr; unsigned int i; GEM_BUG_ON(!n_placements); /* * For the common case of one memory region, skip storing an * allocated array and just point at the region directly. */ if (n_placements == 1) { struct intel_memory_region *mr = placements[0]; struct drm_i915_private *i915 = mr->i915; obj->mm.placements = &i915->mm.regions[mr->id]; obj->mm.n_placements = 1; } else { arr = kmalloc_array(n_placements, sizeof(struct intel_memory_region *), GFP_KERNEL); if (!arr) return -ENOMEM; for (i = 0; i < n_placements; i++) arr[i] = placements[i]; obj->mm.placements = arr; obj->mm.n_placements = n_placements; } return 0; } static int i915_gem_publish(struct drm_i915_gem_object *obj, struct drm_file *file, u64 *size_p, u32 *handle_p) { u64 size = obj->base.size; int ret; ret = drm_gem_handle_create(file, &obj->base, handle_p); /* drop reference from allocate - handle holds it now */ i915_gem_object_put(obj); if (ret) return ret; *size_p = size; return 0; } static struct drm_i915_gem_object * __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size, struct intel_memory_region **placements, unsigned int n_placements, unsigned int ext_flags) { struct intel_memory_region *mr = placements[0]; struct drm_i915_gem_object *obj; unsigned int flags; int ret; i915_gem_flush_free_objects(i915); size = round_up(size, object_max_page_size(placements, n_placements)); if (size == 0) return ERR_PTR(-EINVAL); /* For most of the ABI (e.g. mmap) we think in system pages */ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); if (i915_gem_object_size_2big(size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); ret = object_set_placements(obj, placements, n_placements); if (ret) goto object_free; /* * I915_BO_ALLOC_USER will make sure the object is cleared before * any user access. */ flags = I915_BO_ALLOC_USER; ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags); if (ret) goto object_free; GEM_BUG_ON(size != obj->base.size); /* Add any flag set by create_ext options */ obj->flags |= ext_flags; trace_i915_gem_object_create(obj); return obj; object_free: if (obj->mm.n_placements > 1) kfree(obj->mm.placements); i915_gem_object_free(obj); return ERR_PTR(ret); } /** * __i915_gem_object_create_user - Creates a new object using the same path as * DRM_I915_GEM_CREATE_EXT * @i915: i915 private * @size: size of the buffer, in bytes * @placements: possible placement regions, in priority order * @n_placements: number of possible placement regions * * This function is exposed primarily for selftests and does very little * error checking. It is assumed that the set of placement regions has * already been verified to be valid. */ struct drm_i915_gem_object * __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, struct intel_memory_region **placements, unsigned int n_placements) { return __i915_gem_object_create_user_ext(i915, size, placements, n_placements, 0); } int i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct drm_i915_gem_object *obj; struct intel_memory_region *mr; enum intel_memory_type mem_type; int cpp = DIV_ROUND_UP(args->bpp, 8); u32 format; switch (cpp) { case 1: format = DRM_FORMAT_C8; break; case 2: format = DRM_FORMAT_RGB565; break; case 4: format = DRM_FORMAT_XRGB8888; break; default: return -EINVAL; } /* have to work out size/pitch and return them */ args->pitch = ALIGN(args->width * cpp, 64); /* align stride to page size so that we can remap */ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, DRM_FORMAT_MOD_LINEAR)) args->pitch = ALIGN(args->pitch, 4096); if (args->pitch < args->width) return -EINVAL; args->size = mul_u32_u32(args->pitch, args->height); mem_type = INTEL_MEMORY_SYSTEM; if (HAS_LMEM(to_i915(dev))) mem_type = INTEL_MEMORY_LOCAL; mr = intel_memory_region_by_type(to_i915(dev), mem_type); obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1); if (IS_ERR(obj)) return PTR_ERR(obj); return i915_gem_publish(obj, file, &args->size, &args->handle); } /** * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it. * @dev: drm device pointer * @data: ioctl data blob * @file: drm file pointer */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create *args = data; struct drm_i915_gem_object *obj; struct intel_memory_region *mr; mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); obj = __i915_gem_object_create_user(i915, args->size, &mr, 1); if (IS_ERR(obj)) return PTR_ERR(obj); return i915_gem_publish(obj, file, &args->size, &args->handle); } struct create_ext { struct drm_i915_private *i915; struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; unsigned int n_placements; unsigned int placement_mask; unsigned long flags; unsigned int pat_index; }; static void repr_placements(char *buf, size_t size, struct intel_memory_region **placements, int n_placements) { int i; buf[0] = '\0'; for (i = 0; i < n_placements; i++) { struct intel_memory_region *mr = placements[i]; int r; r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }", mr->name, mr->type, mr->instance); if (r >= size) return; buf += r; size -= r; } } static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args, struct create_ext *ext_data) { struct drm_i915_private *i915 = ext_data->i915; struct drm_i915_gem_memory_class_instance __user *uregions = u64_to_user_ptr(args->regions); struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; u32 mask; int i, ret = 0; if (args->pad) { drm_dbg(&i915->drm, "pad should be zero\n"); ret = -EINVAL; } if (!args->num_regions) { drm_dbg(&i915->drm, "num_regions is zero\n"); ret = -EINVAL; } BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements)); BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements)); if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) { drm_dbg(&i915->drm, "num_regions is too large\n"); ret = -EINVAL; } if (ret) return ret; mask = 0; for (i = 0; i < args->num_regions; i++) { struct drm_i915_gem_memory_class_instance region; struct intel_memory_region *mr; if (copy_from_user(&region, uregions, sizeof(region))) return -EFAULT; mr = intel_memory_region_lookup(i915, region.memory_class, region.memory_instance); if (!mr || mr->private) { drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n", region.memory_class, region.memory_instance, i); ret = -EINVAL; goto out_dump; } if (mask & BIT(mr->id)) { drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n", mr->name, region.memory_class, region.memory_instance, i); ret = -EINVAL; goto out_dump; } placements[i] = mr; mask |= BIT(mr->id); ++uregions; } if (ext_data->n_placements) { ret = -EINVAL; goto out_dump; } ext_data->n_placements = args->num_regions; for (i = 0; i < args->num_regions; i++) ext_data->placements[i] = placements[i]; ext_data->placement_mask = mask; return 0; out_dump: if (1) { char buf[256]; if (ext_data->n_placements) { repr_placements(buf, sizeof(buf), ext_data->placements, ext_data->n_placements); drm_dbg(&i915->drm, "Placements were already set in previous EXT. Existing placements: %s\n", buf); } repr_placements(buf, sizeof(buf), placements, i); drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf); } return ret; } static int ext_set_placements(struct i915_user_extension __user *base, void *data) { struct drm_i915_gem_create_ext_memory_regions ext; if (copy_from_user(&ext, base, sizeof(ext))) return -EFAULT; return set_placements(&ext, data); } static int ext_set_protected(struct i915_user_extension __user *base, void *data) { struct drm_i915_gem_create_ext_protected_content ext; struct create_ext *ext_data = data; if (copy_from_user(&ext, base, sizeof(ext))) return -EFAULT; if (ext.flags) return -EINVAL; if (!intel_pxp_is_enabled(ext_data->i915->pxp)) return -ENODEV; ext_data->flags |= I915_BO_PROTECTED; return 0; } static int ext_set_pat(struct i915_user_extension __user *base, void *data) { struct create_ext *ext_data = data; struct drm_i915_private *i915 = ext_data->i915; struct drm_i915_gem_create_ext_set_pat ext; unsigned int max_pat_index; BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) != offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd)); /* Limiting the extension only to Meteor Lake */ if (!IS_METEORLAKE(i915)) return -ENODEV; if (copy_from_user(&ext, base, sizeof(ext))) return -EFAULT; max_pat_index = INTEL_INFO(i915)->max_pat_index; if (ext.pat_index > max_pat_index) { drm_dbg(&i915->drm, "PAT index is invalid: %u\n", ext.pat_index); return -EINVAL; } ext_data->pat_index = ext.pat_index; return 0; } static const i915_user_extension_fn create_extensions[] = { [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat, }; #define PAT_INDEX_NOT_SET 0xffff /** * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it. * @dev: drm device pointer * @data: ioctl data blob * @file: drm file pointer */ int i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_create_ext *args = data; struct create_ext ext_data = { .i915 = i915 }; struct drm_i915_gem_object *obj; int ret; if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) return -EINVAL; ext_data.pat_index = PAT_INDEX_NOT_SET; ret = i915_user_extensions(u64_to_user_ptr(args->extensions), create_extensions, ARRAY_SIZE(create_extensions), &ext_data); if (ret) return ret; if (!ext_data.n_placements) { ext_data.placements[0] = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); ext_data.n_placements = 1; } if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) { if (ext_data.n_placements == 1) return -EINVAL; /* * We always need to be able to spill to system memory, if we * can't place in the mappable part of LMEM. */ if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM))) return -EINVAL; } else { if (ext_data.n_placements > 1 || ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM) ext_data.flags |= I915_BO_ALLOC_GPU_ONLY; } obj = __i915_gem_object_create_user_ext(i915, args->size, ext_data.placements, ext_data.n_placements, ext_data.flags); if (IS_ERR(obj)) return PTR_ERR(obj); if (ext_data.pat_index != PAT_INDEX_NOT_SET) { i915_gem_object_set_pat_index(obj, ext_data.pat_index); /* Mark pat_index is set by UMD */ obj->pat_set_by_user = true; } return i915_gem_publish(obj, file, &args->size, &args->handle); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_create.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include <linux/dma-fence-array.h> #include <linux/dma-fence-chain.h> #include <linux/jiffies.h> #include "gt/intel_engine.h" #include "gt/intel_rps.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" static long i915_gem_object_wait_fence(struct dma_fence *fence, unsigned int flags, long timeout) { BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return timeout; if (dma_fence_is_i915(fence)) return i915_request_wait_timeout(to_request(fence), flags, timeout); return dma_fence_wait_timeout(fence, flags & I915_WAIT_INTERRUPTIBLE, timeout); } static void i915_gem_object_boost(struct dma_resv *resv, unsigned int flags) { struct dma_resv_iter cursor; struct dma_fence *fence; /* * Prescan all fences for potential boosting before we begin waiting. * * When we wait, we wait on outstanding fences serially. If the * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced * form 1:2, then as we look at each wait in turn we see that each * request is currently executing and not worthy of boosting. But if * we only happen to look at the final fence in the sequence (because * of request coalescing or splitting between read/write arrays by * the iterator), then we would boost. As such our decision to boost * or not is delicately balanced on the order we wait on fences. * * So instead of looking for boosts sequentially, look for all boosts * upfront and then wait on the outstanding fences. */ dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) if (dma_fence_is_i915(fence) && !i915_request_started(to_request(fence))) intel_rps_boost(to_request(fence)); dma_resv_iter_end(&cursor); } static long i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, long timeout) { struct dma_resv_iter cursor; struct dma_fence *fence; long ret = timeout ?: 1; i915_gem_object_boost(resv, flags); dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) { ret = i915_gem_object_wait_fence(fence, flags, timeout); if (ret <= 0) break; if (timeout) timeout = ret; } dma_resv_iter_end(&cursor); return ret; } static void fence_set_priority(struct dma_fence *fence, const struct i915_sched_attr *attr) { struct i915_request *rq; struct intel_engine_cs *engine; if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) return; rq = to_request(fence); engine = rq->engine; rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->sched_engine->schedule) engine->sched_engine->schedule(rq, attr); rcu_read_unlock(); } static inline bool __dma_fence_is_chain(const struct dma_fence *fence) { return fence->ops == &dma_fence_chain_ops; } void i915_gem_fence_wait_priority(struct dma_fence *fence, const struct i915_sched_attr *attr) { if (dma_fence_is_signaled(fence)) return; local_bh_disable(); /* Recurse once into a fence-array */ if (dma_fence_is_array(fence)) { struct dma_fence_array *array = to_dma_fence_array(fence); int i; for (i = 0; i < array->num_fences; i++) fence_set_priority(array->fences[i], attr); } else if (__dma_fence_is_chain(fence)) { struct dma_fence *iter; /* The chain is ordered; if we boost the last, we boost all */ dma_fence_chain_for_each(iter, fence) { fence_set_priority(to_dma_fence_chain(iter)->fence, attr); break; } dma_fence_put(iter); } else { fence_set_priority(fence, attr); } local_bh_enable(); /* kick the tasklets if queues were reprioritised */ } int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr) { struct dma_resv_iter cursor; struct dma_fence *fence; dma_resv_iter_begin(&cursor, obj->base.resv, dma_resv_usage_rw(flags & I915_WAIT_ALL)); dma_resv_for_each_fence_unlocked(&cursor, fence) i915_gem_fence_wait_priority(fence, attr); dma_resv_iter_end(&cursor); return 0; } /** * i915_gem_object_wait - Waits for rendering to the object to be completed * @obj: i915 gem object * @flags: how to wait (under a lock, for all rendering or just for writes etc) * @timeout: how long to wait */ int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout) { might_sleep(); GEM_BUG_ON(timeout < 0); timeout = i915_gem_object_wait_reservation(obj->base.resv, flags, timeout); if (timeout < 0) return timeout; return !timeout ? -ETIME : 0; } static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) { /* nsecs_to_jiffies64() does not guard against overflow */ if ((NSEC_PER_SEC % HZ) != 0 && div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) return MAX_JIFFY_OFFSET; return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); } static unsigned long to_wait_timeout(s64 timeout_ns) { if (timeout_ns < 0) return MAX_SCHEDULE_TIMEOUT; if (timeout_ns == 0) return 0; return nsecs_to_jiffies_timeout(timeout_ns); } /** * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT * @dev: drm device pointer * @data: ioctl data blob * @file: drm file pointer * * Returns 0 if successful, else an error is returned with the remaining time in * the timeout parameter. * -ETIME: object is still busy after timeout * -ERESTARTSYS: signal interrupted the wait * -ENONENT: object doesn't exist * Also possible, but rare: * -EAGAIN: incomplete, restart syscall * -ENOMEM: damn * -ENODEV: Internal IRQ fail * -E?: The add request failed * * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any * non-zero timeout parameter the wait ioctl will wait for the given number of * nanoseconds on an object becoming unbusy. Since the wait itself does so * without holding struct_mutex the object may become re-busied before this * function completes. A similar but shorter * race condition exists in the busy * ioctl */ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; ktime_t start; long ret; if (args->flags != 0) return -EINVAL; obj = i915_gem_object_lookup(file, args->bo_handle); if (!obj) return -ENOENT; start = ktime_get(); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | I915_WAIT_ALL, to_wait_timeout(args->timeout_ns)); if (args->timeout_ns > 0) { args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); if (args->timeout_ns < 0) args->timeout_ns = 0; /* * Apparently ktime isn't accurate enough and occasionally has a * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch * things up to make the test happy. We allow up to 1 jiffy. * * This is a regression from the timespec->ktime conversion. */ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) args->timeout_ns = 0; /* Asked to wait beyond the jiffie/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; } i915_gem_object_put(obj); return ret; } /** * i915_gem_object_wait_migration - Sync an accelerated migration operation * @obj: The migrating object. * @flags: waiting flags. Currently supports only I915_WAIT_INTERRUPTIBLE. * * Wait for any pending async migration operation on the object, * whether it's explicitly (i915_gem_object_migrate()) or implicitly * (swapin, initial clearing) initiated. * * Return: 0 if successful, -ERESTARTSYS if a signal was hit during waiting. */ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, unsigned int flags) { might_sleep(); return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE)); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_wait.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/highmem.h> #include <linux/shmem_fs.h> #include <linux/swap.h> #include <drm/drm_cache.h> #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_gem_region.h" #include "i915_gem_tiling.h" #include "i915_scatterlist.h" static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; struct drm_i915_private *i915 = to_i915(obj->base.dev); struct scatterlist *sg; struct sg_table *st; dma_addr_t dma; void *vaddr; void *dst; int i; /* Contiguous chunk, with a single scatterlist element */ if (overflows_type(obj->base.size, sg->length)) return -E2BIG; if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) return -EINVAL; /* * Always aligning to the object size, allows a single allocation * to handle all possible callers, and given typical object sizes, * the alignment of the buddy allocation will naturally match. */ vaddr = dma_alloc_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), &dma, GFP_KERNEL); if (!vaddr) return -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) goto err_pci; if (sg_alloc_table(st, 1, GFP_KERNEL)) goto err_st; sg = st->sgl; sg->offset = 0; sg->length = obj->base.size; sg_assign_page(sg, (struct page *)vaddr); sg_dma_address(sg) = dma; sg_dma_len(sg) = obj->base.size; dst = vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; void *src; page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) goto err_st; src = kmap_atomic(page); memcpy(dst, src, PAGE_SIZE); drm_clflush_virt_range(dst, PAGE_SIZE); kunmap_atomic(src); put_page(page); dst += PAGE_SIZE; } intel_gt_chipset_flush(to_gt(i915)); /* We're no longer struct page backed */ obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; __i915_gem_object_set_pages(obj, st); return 0; err_st: kfree(st); err_pci: dma_free_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), vaddr, dma); return -ENOMEM; } void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { dma_addr_t dma = sg_dma_address(pages->sgl); void *vaddr = sg_page(pages->sgl); __i915_gem_object_release_shmem(obj, pages, false); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; void *src = vaddr; int i; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; char *dst; page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) continue; dst = kmap_atomic(page); drm_clflush_virt_range(src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(dst); set_page_dirty(page); if (obj->mm.madv == I915_MADV_WILLNEED) mark_page_accessed(page); put_page(page); src += PAGE_SIZE; } obj->mm.dirty = false; } sg_free_table(pages); kfree(pages); dma_free_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), vaddr, dma); } int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *args) { void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (err) return err; /* * We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); if (copy_from_user(vaddr, user_data, args->size)) return -EFAULT; drm_clflush_virt_range(vaddr, args->size); intel_gt_chipset_flush(to_gt(i915)); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); return 0; } int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *args) { void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); int err; err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); if (err) return err; drm_clflush_virt_range(vaddr, args->size); if (copy_to_user(user_data, vaddr, args->size)) return -EFAULT; return 0; } static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) { struct sg_table *pages; int err; pages = __i915_gem_object_unset_pages(obj); err = i915_gem_object_get_pages_phys(obj); if (err) goto err_xfer; /* Perma-pin (until release) the physical set of pages */ __i915_gem_object_pin_pages(obj); if (!IS_ERR_OR_NULL(pages)) i915_gem_object_put_pages_shmem(obj, pages); i915_gem_object_release_memory_region(obj); return 0; err_xfer: if (!IS_ERR_OR_NULL(pages)) __i915_gem_object_set_pages(obj, pages); return err; } int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) { int err; assert_object_held(obj); if (align > obj->base.size) return -EINVAL; if (!i915_gem_object_is_shmem(obj)) return -EINVAL; if (!i915_gem_object_has_struct_page(obj)) return 0; err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); if (err) return err; if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; if (i915_gem_object_has_tiling_quirk(obj)) return -EFAULT; if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) return -EBUSY; if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { drm_dbg(obj->base.dev, "Attempting to obtain a purgeable object\n"); return -EFAULT; } return i915_gem_object_shmem_to_phys(obj); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_gem_phys.c" #endif
linux-master
drivers/gpu/drm/i915/gem/i915_gem_phys.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2017 Intel Corporation */ #include <linux/fs.h> #include <linux/mount.h> #include "i915_drv.h" #include "i915_gemfs.h" #include "i915_utils.h" void i915_gemfs_init(struct drm_i915_private *i915) { char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; struct vfsmount *gemfs; /* * By creating our own shmemfs mountpoint, we can pass in * mount flags that better match our usecase. * * One example, although it is probably better with a per-file * control, is selecting huge page allocations ("huge=within_size"). * However, we only do so on platforms which benefit from it, or to * offset the overhead of iommu lookups, where with latter it is a net * win even on platforms which would otherwise see some performance * regressions such a slow reads issue on Broadwell and Skylake. */ if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915)) return; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) goto err; type = get_fs_type("tmpfs"); if (!type) goto err; gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); if (IS_ERR(gemfs)) goto err; i915->mm.gemfs = gemfs; drm_info(&i915->drm, "Using Transparent Hugepages\n"); return; err: drm_notice(&i915->drm, "Transparent Hugepage support is recommended for optimal performance%s\n", GRAPHICS_VER(i915) >= 11 ? " on this platform!" : " when IOMMU is enabled!"); } void i915_gemfs_fini(struct drm_i915_private *i915) { kern_unmount(i915->mm.gemfs); }
linux-master
drivers/gpu/drm/i915/gem/i915_gemfs.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/ttm/ttm_tt.h> #include "i915_deps.h" #include "i915_drv.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" #include "gem/i915_gem_object.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" #include "gem/i915_gem_ttm_move.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_migrate.h" /** * DOC: Selftest failure modes for failsafe migration: * * For fail_gpu_migration, the gpu blit scheduled is always a clear blit * rather than a copy blit, and then we force the failure paths as if * the blit fence returned an error. * * For fail_work_allocation we fail the kmalloc of the async worker, we * sync the gpu blit. If it then fails, or fail_gpu_migration is set to * true, then a memcpy operation is performed sync. */ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) static bool fail_gpu_migration; static bool fail_work_allocation; static bool ban_memcpy; void i915_ttm_migrate_set_failure_modes(bool gpu_migration, bool work_allocation) { fail_gpu_migration = gpu_migration; fail_work_allocation = work_allocation; } void i915_ttm_migrate_set_ban_memcpy(bool ban) { ban_memcpy = ban; } #endif static enum i915_cache_level i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, struct ttm_tt *ttm) { return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !i915_ttm_gtt_binds_lmem(res) && ttm->caching == ttm_cached) ? I915_CACHE_LLC : I915_CACHE_NONE; } static struct intel_memory_region * i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) { struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); /* There's some room for optimization here... */ GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && ttm_mem_type < I915_PL_LMEM0); if (ttm_mem_type == I915_PL_SYSTEM) return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, 0); return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, ttm_mem_type - I915_PL_LMEM0); } /** * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a * TTM move * @obj: The gem object */ void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { obj->write_domain = I915_GEM_DOMAIN_WC; obj->read_domains = I915_GEM_DOMAIN_WC; } else { obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; } } /** * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move * @obj: The gem object * * Adjusts the GEM object's region, mem_flags and cache coherency after a * TTM move. */ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); unsigned int cache_level; unsigned int mem_flags; unsigned int i; int mem_type; /* * We might have been purged (or swapped out) if the resource is NULL, * in which case the SYSTEM placement is the closest match to describe * the current domain. If the object is ever used in this state then we * will require moving it again. */ if (!bo->resource) { mem_flags = I915_BO_FLAG_STRUCT_PAGE; mem_type = I915_PL_SYSTEM; cache_level = I915_CACHE_NONE; } else { mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : I915_BO_FLAG_STRUCT_PAGE; mem_type = bo->resource->mem_type; cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, bo->ttm); } /* * If object was moved to an allowable region, update the object * region to consider it migrated. Note that if it's currently not * in an allowable region, it's evicted and we don't update the * object region. */ if (intel_region_to_ttm_type(obj->mm.region) != mem_type) { for (i = 0; i < obj->mm.n_placements; ++i) { struct intel_memory_region *mr = obj->mm.placements[i]; if (intel_region_to_ttm_type(mr) == mem_type && mr != obj->mm.region) { i915_gem_object_release_memory_region(obj); i915_gem_object_init_memory_region(obj, mr); break; } } } obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); obj->mem_flags |= mem_flags; i915_gem_object_set_cache_coherency(obj, cache_level); } /** * i915_ttm_move_notify - Prepare an object for move * @bo: The ttm buffer object. * * This function prepares an object for move by removing all GPU bindings, * removing all CPU mapings and finally releasing the pages sg-table. * * Return: 0 if successful, negative error code on error. */ int i915_ttm_move_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); int ret; /* * Note: The async unbinding here will actually transform the * blocking wait for unbind into a wait before finally submitting * evict / migration blit and thus stall the migration timeline * which may not be good for overall throughput. We should make * sure we await the unbind fences *after* the migration blit * instead of *before* as we currently do. */ ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE | I915_GEM_OBJECT_UNBIND_ASYNC); if (ret) return ret; ret = __i915_gem_object_put_pages(obj); if (ret) return ret; return 0; } static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct sg_table *dst_st, const struct i915_deps *deps) { struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct i915_request *rq; struct ttm_tt *src_ttm = bo->ttm; enum i915_cache_level src_level, dst_level; int ret; if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915))) return ERR_PTR(-EINVAL); /* With fail_gpu_migration, we always perform a GPU clear. */ if (I915_SELFTEST_ONLY(fail_gpu_migration)) clear = true; dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); if (clear) { if (bo->type == ttm_bo_type_kernel && !I915_SELFTEST_ONLY(fail_gpu_migration)) return ERR_PTR(-EINVAL); intel_engine_pm_get(to_gt(i915)->migrate.context->engine); ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps, dst_st->sgl, i915_gem_get_pat_index(i915, dst_level), i915_ttm_gtt_binds_lmem(dst_mem), 0, &rq); } else { struct i915_refct_sgt *src_rsgt = i915_ttm_resource_get_st(obj, bo->resource); if (IS_ERR(src_rsgt)) return ERR_CAST(src_rsgt); src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); intel_engine_pm_get(to_gt(i915)->migrate.context->engine); ret = intel_context_migrate_copy(to_gt(i915)->migrate.context, deps, src_rsgt->table.sgl, i915_gem_get_pat_index(i915, src_level), i915_ttm_gtt_binds_lmem(bo->resource), dst_st->sgl, i915_gem_get_pat_index(i915, dst_level), i915_ttm_gtt_binds_lmem(dst_mem), &rq); i915_refct_sgt_put(src_rsgt); } intel_engine_pm_put(to_gt(i915)->migrate.context->engine); if (ret && rq) { i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); i915_request_put(rq); } return ret ? ERR_PTR(ret) : &rq->fence; } /** * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality. * @_dst_iter: Storage space for the destination kmap iterator. * @_src_iter: Storage space for the source kmap iterator. * @dst_iter: Pointer to the destination kmap iterator. * @src_iter: Pointer to the source kmap iterator. * @num_pages: Number of pages * @clear: Whether to clear instead of copy. * @src_rsgt: Refcounted scatter-gather list of source memory. * @dst_rsgt: Refcounted scatter-gather list of destination memory. */ struct i915_ttm_memcpy_arg { union { struct ttm_kmap_iter_tt tt; struct ttm_kmap_iter_iomap io; } _dst_iter, _src_iter; struct ttm_kmap_iter *dst_iter; struct ttm_kmap_iter *src_iter; unsigned long num_pages; bool clear; struct i915_refct_sgt *src_rsgt; struct i915_refct_sgt *dst_rsgt; }; /** * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence. * @fence: The dma-fence. * @work: The work struct use for the memcpy work. * @lock: The fence lock. Not used to protect anything else ATM. * @irq_work: Low latency worker to signal the fence since it can't be done * from the callback for lockdep reasons. * @cb: Callback for the accelerated migration fence. * @arg: The argument for the memcpy functionality. * @i915: The i915 pointer. * @obj: The GEM object. * @memcpy_allowed: Instead of processing the @arg, and falling back to memcpy * or memset, we wedge the device and set the @obj unknown_state, to prevent * further access to the object with the CPU or GPU. On some devices we might * only be permitted to use the blitter engine for such operations. */ struct i915_ttm_memcpy_work { struct dma_fence fence; struct work_struct work; spinlock_t lock; struct irq_work irq_work; struct dma_fence_cb cb; struct i915_ttm_memcpy_arg arg; struct drm_i915_private *i915; struct drm_i915_gem_object *obj; bool memcpy_allowed; }; static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg) { ttm_move_memcpy(arg->clear, arg->num_pages, arg->dst_iter, arg->src_iter); } static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, struct ttm_buffer_object *bo, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct i915_refct_sgt *dst_rsgt) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct intel_memory_region *dst_reg, *src_reg; dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); GEM_BUG_ON(!dst_reg || !src_reg); arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ? ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) : ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap, &dst_rsgt->table, dst_reg->region.start); arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ? ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap, &obj->ttm.cached_io_rsgt->table, src_reg->region.start); arg->clear = clear; arg->num_pages = bo->base.size >> PAGE_SHIFT; arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt); arg->src_rsgt = clear ? NULL : i915_ttm_resource_get_st(obj, bo->resource); } static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg) { i915_refct_sgt_put(arg->src_rsgt); i915_refct_sgt_put(arg->dst_rsgt); } static void __memcpy_work(struct work_struct *work) { struct i915_ttm_memcpy_work *copy_work = container_of(work, typeof(*copy_work), work); struct i915_ttm_memcpy_arg *arg = &copy_work->arg; bool cookie; /* * FIXME: We need to take a closer look here. We should be able to plonk * this into the fence critical section. */ if (!copy_work->memcpy_allowed) { struct intel_gt *gt; unsigned int id; for_each_gt(gt, copy_work->i915, id) intel_gt_set_wedged(gt); } cookie = dma_fence_begin_signalling(); if (copy_work->memcpy_allowed) { i915_ttm_move_memcpy(arg); } else { /* * Prevent further use of the object. Any future GTT binding or * CPU access is not allowed once we signal the fence. Outside * of the fence critical section, we then also then wedge the gpu * to indicate the device is not functional. * * The below dma_fence_signal() is our write-memory-barrier. */ copy_work->obj->mm.unknown_state = true; } dma_fence_end_signalling(cookie); dma_fence_signal(&copy_work->fence); i915_ttm_memcpy_release(arg); i915_gem_object_put(copy_work->obj); dma_fence_put(&copy_work->fence); } static void __memcpy_irq_work(struct irq_work *irq_work) { struct i915_ttm_memcpy_work *copy_work = container_of(irq_work, typeof(*copy_work), irq_work); struct i915_ttm_memcpy_arg *arg = &copy_work->arg; dma_fence_signal(&copy_work->fence); i915_ttm_memcpy_release(arg); i915_gem_object_put(copy_work->obj); dma_fence_put(&copy_work->fence); } static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct i915_ttm_memcpy_work *copy_work = container_of(cb, typeof(*copy_work), cb); if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) { INIT_WORK(&copy_work->work, __memcpy_work); queue_work(system_unbound_wq, &copy_work->work); } else { init_irq_work(&copy_work->irq_work, __memcpy_irq_work); irq_work_queue(&copy_work->irq_work); } } static const char *get_driver_name(struct dma_fence *fence) { return "i915_ttm_memcpy_work"; } static const char *get_timeline_name(struct dma_fence *fence) { return "unbound"; } static const struct dma_fence_ops dma_fence_memcpy_ops = { .get_driver_name = get_driver_name, .get_timeline_name = get_timeline_name, }; static struct dma_fence * i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, struct dma_fence *dep) { int ret; spin_lock_init(&work->lock); dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0); dma_fence_get(&work->fence); ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb); if (ret) { if (ret != -ENOENT) dma_fence_wait(dep, false); return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : dep->error); } return &work->fence; } static bool i915_ttm_memcpy_allowed(struct ttm_buffer_object *bo, struct ttm_resource *dst_mem) { if (i915_gem_object_needs_ccs_pages(i915_ttm_to_gem(bo))) return false; if (!(i915_ttm_resource_mappable(bo->resource) && i915_ttm_resource_mappable(dst_mem))) return false; return I915_SELFTEST_ONLY(ban_memcpy) ? false : true; } static struct dma_fence * __i915_ttm_move(struct ttm_buffer_object *bo, const struct ttm_operation_ctx *ctx, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct i915_refct_sgt *dst_rsgt, bool allow_accel, const struct i915_deps *move_deps) { const bool memcpy_allowed = i915_ttm_memcpy_allowed(bo, dst_mem); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct drm_i915_private *i915 = to_i915(bo->base.dev); struct i915_ttm_memcpy_work *copy_work = NULL; struct i915_ttm_memcpy_arg _arg, *arg = &_arg; struct dma_fence *fence = ERR_PTR(-EINVAL); if (allow_accel) { fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, &dst_rsgt->table, move_deps); /* * We only need to intercept the error when moving to lmem. * When moving to system, TTM or shmem will provide us with * cleared pages. */ if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) && !I915_SELFTEST_ONLY(fail_gpu_migration || fail_work_allocation)) goto out; } /* If we've scheduled gpu migration. Try to arm error intercept. */ if (!IS_ERR(fence)) { struct dma_fence *dep = fence; if (!I915_SELFTEST_ONLY(fail_work_allocation)) copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL); if (copy_work) { copy_work->i915 = i915; copy_work->memcpy_allowed = memcpy_allowed; copy_work->obj = i915_gem_object_get(obj); arg = &copy_work->arg; if (memcpy_allowed) i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, dst_rsgt); fence = i915_ttm_memcpy_work_arm(copy_work, dep); } else { dma_fence_wait(dep, false); fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : fence->error); } dma_fence_put(dep); if (!IS_ERR(fence)) goto out; } else { int err = PTR_ERR(fence); if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) return fence; if (move_deps) { err = i915_deps_sync(move_deps, ctx); if (err) return ERR_PTR(err); } } /* Error intercept failed or no accelerated migration to start with */ if (memcpy_allowed) { if (!copy_work) i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, dst_rsgt); i915_ttm_move_memcpy(arg); i915_ttm_memcpy_release(arg); } if (copy_work) i915_gem_object_put(copy_work->obj); kfree(copy_work); return memcpy_allowed ? NULL : ERR_PTR(-EIO); out: if (!fence && copy_work) { i915_ttm_memcpy_release(arg); i915_gem_object_put(copy_work->obj); kfree(copy_work); } return fence; } /** * i915_ttm_move - The TTM move callback used by i915. * @bo: The buffer object. * @evict: Whether this is an eviction. * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits should be * performed if waiting * @dst_mem: The destination ttm resource. * @hop: If we need multihop, what temporary memory type to move to. * * Return: 0 if successful, negative error code otherwise. */ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *dst_mem, struct ttm_place *hop) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct ttm_resource_manager *dst_man = ttm_manager_type(bo->bdev, dst_mem->mem_type); struct dma_fence *migration_fence = NULL; struct ttm_tt *ttm = bo->ttm; struct i915_refct_sgt *dst_rsgt; bool clear, prealloc_bo; int ret; if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) { ttm_bo_move_null(bo, dst_mem); return 0; } if (!bo->resource) { if (dst_mem->mem_type != TTM_PL_SYSTEM) { hop->mem_type = TTM_PL_SYSTEM; hop->flags = TTM_PL_FLAG_TEMPORARY; return -EMULTIHOP; } /* * This is only reached when first creating the object, or if * the object was purged or swapped out (pipeline-gutting). For * the former we can safely skip all of the below since we are * only using a dummy SYSTEM placement here. And with the latter * we will always re-enter here with bo->resource set correctly * (as per the above), since this is part of a multi-hop * sequence, where at the end we can do the move for real. * * The special case here is when the dst_mem is TTM_PL_SYSTEM, * which doens't require any kind of move, so it should be safe * to skip all the below and call ttm_bo_move_null() here, where * the caller in __i915_ttm_get_pages() will take care of the * rest, since we should have a valid ttm_tt. */ ttm_bo_move_null(bo, dst_mem); return 0; } ret = i915_ttm_move_notify(bo); if (ret) return ret; if (obj->mm.madv != I915_MADV_WILLNEED) { i915_ttm_purge(obj); ttm_resource_free(bo, &dst_mem); return 0; } /* Populate ttm with pages if needed. Typically system memory. */ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { ret = ttm_tt_populate(bo->bdev, ttm, ctx); if (ret) return ret; } dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); if (IS_ERR(dst_rsgt)) return PTR_ERR(dst_rsgt); clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); prealloc_bo = obj->flags & I915_BO_PREALLOC; if (!(clear && ttm && !((ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) && !prealloc_bo))) { struct i915_deps deps; i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); ret = i915_deps_add_resv(&deps, bo->base.resv, ctx); if (ret) { i915_refct_sgt_put(dst_rsgt); return ret; } migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm, dst_rsgt, true, &deps); i915_deps_fini(&deps); } /* We can possibly get an -ERESTARTSYS here */ if (IS_ERR(migration_fence)) { i915_refct_sgt_put(dst_rsgt); return PTR_ERR(migration_fence); } if (migration_fence) { if (I915_SELFTEST_ONLY(evict && fail_gpu_migration)) ret = -EIO; /* never feed non-migrate fences into ttm */ else ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict, true, dst_mem); if (ret) { dma_fence_wait(migration_fence, false); ttm_bo_move_sync_cleanup(bo, dst_mem); } dma_fence_put(migration_fence); } else { ttm_bo_move_sync_cleanup(bo, dst_mem); } i915_ttm_adjust_domains_after_move(obj); i915_ttm_free_cached_io_rsgt(obj); if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) { obj->ttm.cached_io_rsgt = dst_rsgt; obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; obj->ttm.get_io_page.sg_idx = 0; } else { i915_refct_sgt_put(dst_rsgt); } i915_ttm_adjust_lru(obj); i915_ttm_adjust_gem_after_move(obj); return 0; } /** * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to * another * @dst: The destination object * @src: The source object * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. * @intr: Whether to perform waits interruptible: * * Note: The caller is responsible for assuring that the underlying * TTM objects are populated if needed and locked. * * Return: Zero on success. Negative error code on error. If @intr == true, * then it may return -ERESTARTSYS or -EINTR. */ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, struct drm_i915_gem_object *src, bool allow_accel, bool intr) { struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); struct ttm_operation_ctx ctx = { .interruptible = intr, }; struct i915_refct_sgt *dst_rsgt; struct dma_fence *copy_fence; struct i915_deps deps; int ret; assert_object_held(dst); assert_object_held(src); if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) return -EINVAL; i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); ret = dma_resv_reserve_fences(src_bo->base.resv, 1); if (ret) return ret; ret = dma_resv_reserve_fences(dst_bo->base.resv, 1); if (ret) return ret; ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx); if (ret) return ret; ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx); if (ret) return ret; dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource, dst_bo->ttm, dst_rsgt, allow_accel, &deps); i915_deps_fini(&deps); i915_refct_sgt_put(dst_rsgt); if (IS_ERR_OR_NULL(copy_fence)) return PTR_ERR_OR_ZERO(copy_fence); dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); dma_fence_put(copy_fence); return 0; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2019 Intel Corporation */ #include "gem/i915_gem_pm.h" #include "gem/i915_gem_ttm_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" #include "i915_driver.h" #include "i915_drv.h" #if defined(CONFIG_X86) #include <asm/smp.h> #else #define wbinvd_on_all_cpus() \ pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__) #endif void i915_gem_suspend(struct drm_i915_private *i915) { struct intel_gt *gt; unsigned int i; GEM_TRACE("%s\n", dev_name(i915->drm.dev)); intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0); flush_workqueue(i915->wq); /* * We have to flush all the executing contexts to main memory so * that they can saved in the hibernation image. To ensure the last * context image is coherent, we have to switch away from it. That * leaves the i915->kernel_context still active when * we actually suspend, and its image in memory may not match the GPU * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ for_each_gt(gt, i915, i) intel_gt_suspend_prepare(gt); i915_gem_drain_freed_objects(i915); } static int lmem_restore(struct drm_i915_private *i915, u32 flags) { struct intel_memory_region *mr; int ret = 0, id; for_each_memory_region(mr, i915, id) { if (mr->type == INTEL_MEMORY_LOCAL) { ret = i915_ttm_restore_region(mr, flags); if (ret) break; } } return ret; } static int lmem_suspend(struct drm_i915_private *i915, u32 flags) { struct intel_memory_region *mr; int ret = 0, id; for_each_memory_region(mr, i915, id) { if (mr->type == INTEL_MEMORY_LOCAL) { ret = i915_ttm_backup_region(mr, flags); if (ret) break; } } return ret; } static void lmem_recover(struct drm_i915_private *i915) { struct intel_memory_region *mr; int id; for_each_memory_region(mr, i915, id) if (mr->type == INTEL_MEMORY_LOCAL) i915_ttm_recover_region(mr); } int i915_gem_backup_suspend(struct drm_i915_private *i915) { int ret; /* Opportunistically try to evict unpinned objects */ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU); if (ret) goto out_recover; i915_gem_suspend(i915); /* * More objects may have become unpinned as requests were * retired. Now try to evict again. The gt may be wedged here * in which case we automatically fall back to memcpy. * We allow also backing up pinned objects that have not been * marked for early recover, and that may contain, for example, * page-tables for the migrate context. */ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU | I915_TTM_BACKUP_PINNED); if (ret) goto out_recover; /* * Remaining objects are backed up using memcpy once we've stopped * using the migrate context. */ ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED); if (ret) goto out_recover; return 0; out_recover: lmem_recover(i915); return ret; } void i915_gem_suspend_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; struct list_head *phases[] = { &i915->mm.shrink_list, &i915->mm.purge_list, NULL }, **phase; struct intel_gt *gt; unsigned long flags; unsigned int i; bool flush = false; /* * Neither the BIOS, ourselves or any other kernel * expects the system to be in execlists mode on startup, * so we need to reset the GPU back to legacy mode. And the only * known way to disable logical contexts is through a GPU reset. * * So in order to leave the system in a known default configuration, * always reset the GPU upon unload and suspend. Afterwards we then * clean up the GEM state tracking, flushing off the requests and * leaving the system in a known idle state. * * Note that is of the upmost importance that the GPU is idle and * all stray writes are flushed *before* we dismantle the backing * storage for the pinned objects. * * However, since we are uncertain that resetting the GPU on older * machines is a good idea, we don't - just in case it leaves the * machine in an unusable condition. */ for_each_gt(gt, i915, i) intel_gt_suspend_late(gt); spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { list_for_each_entry(obj, *phase, mm.link) { if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0; __start_cpu_write(obj); /* presume auto-hibernate */ } } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (flush) wbinvd_on_all_cpus(); } int i915_gem_freeze(struct drm_i915_private *i915) { /* Discard all purgeable objects, let userspace recover those as * required after resuming. */ i915_gem_shrink_all(i915); return 0; } int i915_gem_freeze_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; /* * Called just before we write the hibernation image. * * We need to update the domain tracking to reflect that the CPU * will be accessing all the pages to create and restore from the * hibernation, and so upon restoration those pages will be in the * CPU domain. * * To make sure the hibernation image contains the latest state, * we update that state just before writing out the image. * * To try and reduce the hibernation image, we manually shrink * the objects as well, see i915_gem_freeze() */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) i915_gem_shrink(NULL, i915, -1UL, NULL, ~0); i915_gem_drain_freed_objects(i915); wbinvd_on_all_cpus(); list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) __start_cpu_write(obj); return 0; } void i915_gem_resume(struct drm_i915_private *i915) { struct intel_gt *gt; int ret, i, j; GEM_TRACE("%s\n", dev_name(i915->drm.dev)); ret = lmem_restore(i915, 0); GEM_WARN_ON(ret); /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ for_each_gt(gt, i915, i) if (intel_gt_resume(gt)) goto err_wedged; ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU); GEM_WARN_ON(ret); return; err_wedged: for_each_gt(gt, i915, j) { if (!intel_gt_is_wedged(gt)) { dev_err(i915->drm.dev, "Failed to re-initialize GPU[%u], declaring it wedged!\n", j); intel_gt_set_wedged(gt); } if (j == i) break; } }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_pm.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <uapi/drm/i915_drm.h> #include "intel_memory_region.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_lmem.h" #include "i915_drv.h" void __iomem * i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, unsigned long n, unsigned long size) { resource_size_t offset; GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); offset = i915_gem_object_get_dma_address(obj, n); offset -= obj->mm.region->region.start; return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); } /** * i915_gem_object_is_lmem - Whether the object is resident in * lmem * @obj: The object to check. * * Even if an object is allowed to migrate and change memory region, * this function checks whether it will always be present in lmem when * valid *or* if that's not the case, whether it's currently resident in lmem. * For migratable and evictable objects, the latter only makes sense when * the object is locked. * * Return: Whether the object migratable but resident in lmem, or not * migratable and will be present in lmem when valid. */ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP if (i915_gem_object_migratable(obj) && i915_gem_object_evictable(obj)) assert_object_held(obj); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); } /** * __i915_gem_object_is_lmem - Whether the object is resident in * lmem while in the fence signaling critical path. * @obj: The object to check. * * This function is intended to be called from within the fence signaling * path where the fence, or a pin, keeps the object from being migrated. For * example during gpu reset or similar. * * Return: Whether the object is resident in lmem. */ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); } /** * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the * minimum page size for the backing pages. * @i915: The i915 instance. * @size: The size in bytes for the object. Note that we need to round the size * up depending on the @page_size. The final object size can be fished out from * the drm GEM object. * @page_size: The requested minimum page size in bytes for this object. This is * useful if we need something bigger than the regions min_page_size due to some * hw restriction, or in some very specialised cases where it needs to be * smaller, where the internal fragmentation cost is too great when rounding up * the object size. * @flags: The optional BO allocation flags. * * Note that this interface assumes you know what you are doing when forcing the * @page_size. If this is smaller than the regions min_page_size then it can * never be inserted into any GTT, otherwise it might lead to undefined * behaviour. * * Return: The object pointer, which might be an ERR_PTR in the case of failure. */ struct drm_i915_gem_object * __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915, resource_size_t size, resource_size_t page_size, unsigned int flags) { return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0], size, page_size, flags); } struct drm_i915_gem_object * i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, const void *data, size_t size) { struct drm_i915_gem_object *obj; void *map; obj = i915_gem_object_create_lmem(i915, round_up(size, PAGE_SIZE), I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) return obj; map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(map)) { i915_gem_object_put(obj); return map; } memcpy(map, data, size); i915_gem_object_flush_map(obj); __i915_gem_object_release_map(obj); return obj; } struct drm_i915_gem_object * i915_gem_object_create_lmem(struct drm_i915_private *i915, resource_size_t size, unsigned int flags) { return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0], size, 0, flags); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
/* * Copyright © 2017 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include <linux/highmem.h> #include <linux/sched/mm.h> #include <drm/drm_cache.h> #include "display/intel_frontbuffer.h" #include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_file_private.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_dmabuf.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_gem_ttm.h" #include "i915_memcpy.h" #include "i915_trace.h" static struct kmem_cache *slab_objects; static const struct drm_gem_object_funcs i915_gem_object_funcs; unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915, enum i915_cache_level level) { if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL)) return 0; return INTEL_INFO(i915)->cachelevel_to_pat[level]; } bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj, enum i915_cache_level lvl) { /* * In case the pat_index is set by user space, this kernel mode * driver should leave the coherency to be managed by user space, * simply return true here. */ if (obj->pat_set_by_user) return true; /* * Otherwise the pat_index should have been converted from cache_level * so that the following comparison is valid. */ return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl); } struct drm_i915_gem_object *i915_gem_object_alloc(void) { struct drm_i915_gem_object *obj; obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL); if (!obj) return NULL; obj->base.funcs = &i915_gem_object_funcs; return obj; } void i915_gem_object_free(struct drm_i915_gem_object *obj) { return kmem_cache_free(slab_objects, obj); } void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops, struct lock_class_key *key, unsigned flags) { /* * A gem object is embedded both in a struct ttm_buffer_object :/ and * in a drm_i915_gem_object. Make sure they are aliased. */ BUILD_BUG_ON(offsetof(typeof(*obj), base) != offsetof(typeof(*obj), __do_not_access.base)); spin_lock_init(&obj->vma.lock); INIT_LIST_HEAD(&obj->vma.list); INIT_LIST_HEAD(&obj->mm.link); INIT_LIST_HEAD(&obj->lut_list); spin_lock_init(&obj->lut_lock); spin_lock_init(&obj->mmo.lock); obj->mmo.offsets = RB_ROOT; init_rcu_head(&obj->rcu); obj->ops = ops; GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); obj->flags = flags; obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_dma_page.lock); } /** * __i915_gem_object_fini - Clean up a GEM object initialization * @obj: The gem object to cleanup * * This function cleans up gem object fields that are set up by * drm_gem_private_object_init() and i915_gem_object_init(). * It's primarily intended as a helper for backends that need to * clean up the gem object in separate steps. */ void __i915_gem_object_fini(struct drm_i915_gem_object *obj) { mutex_destroy(&obj->mm.get_page.lock); mutex_destroy(&obj->mm.get_dma_page.lock); dma_resv_fini(&obj->base._resv); } /** * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels * for a given cache_level * @obj: #drm_i915_gem_object * @cache_level: cache level */ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level) { struct drm_i915_private *i915 = to_i915(obj->base.dev); obj->pat_index = i915_gem_get_pat_index(i915, cache_level); if (cache_level != I915_CACHE_NONE) obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | I915_BO_CACHE_COHERENT_FOR_WRITE); else if (HAS_LLC(i915)) obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; else obj->cache_coherent = 0; obj->cache_dirty = !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && !IS_DGFX(i915); } /** * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode * @obj: #drm_i915_gem_object * @pat_index: PAT index * * This is a clone of i915_gem_object_set_cache_coherency taking pat index * instead of cache_level as its second argument. */ void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj, unsigned int pat_index) { struct drm_i915_private *i915 = to_i915(obj->base.dev); if (obj->pat_index == pat_index) return; obj->pat_index = pat_index; if (pat_index != i915_gem_get_pat_index(i915, I915_CACHE_NONE)) obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | I915_BO_CACHE_COHERENT_FOR_WRITE); else if (HAS_LLC(i915)) obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; else obj->cache_coherent = 0; obj->cache_dirty = !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && !IS_DGFX(i915); } bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); /* * This is purely from a security perspective, so we simply don't care * about non-userspace objects being able to bypass the LLC. */ if (!(obj->flags & I915_BO_ALLOC_USER)) return false; /* * Always flush cache for UMD objects at creation time. */ if (obj->pat_set_by_user) return true; /* * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it * possible for userspace to bypass the GTT caching bits set by the * kernel, as per the given object cache_level. This is troublesome * since the heavy flush we apply when first gathering the pages is * skipped if the kernel thinks the object is coherent with the GPU. As * a result it might be possible to bypass the cache and read the * contents of the page directly, which could be stale data. If it's * just a case of userspace shooting themselves in the foot then so be * it, but since i915 takes the stance of always zeroing memory before * handing it to userspace, we need to prevent this. */ return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)); } static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) { struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_file_private *fpriv = file->driver_priv; struct i915_lut_handle bookmark = {}; struct i915_mmap_offset *mmo, *mn; struct i915_lut_handle *lut, *ln; LIST_HEAD(close); spin_lock(&obj->lut_lock); list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { struct i915_gem_context *ctx = lut->ctx; if (ctx && ctx->file_priv == fpriv) { i915_gem_context_get(ctx); list_move(&lut->obj_link, &close); } /* Break long locks, and carefully continue on from this spot */ if (&ln->obj_link != &obj->lut_list) { list_add_tail(&bookmark.obj_link, &ln->obj_link); if (cond_resched_lock(&obj->lut_lock)) list_safe_reset_next(&bookmark, ln, obj_link); __list_del_entry(&bookmark.obj_link); } } spin_unlock(&obj->lut_lock); spin_lock(&obj->mmo.lock); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) drm_vma_node_revoke(&mmo->vma_node, file); spin_unlock(&obj->mmo.lock); list_for_each_entry_safe(lut, ln, &close, obj_link) { struct i915_gem_context *ctx = lut->ctx; struct i915_vma *vma; /* * We allow the process to have multiple handles to the same * vma, in the same fd namespace, by virtue of flink/open. */ mutex_lock(&ctx->lut_mutex); vma = radix_tree_delete(&ctx->handles_vma, lut->handle); if (vma) { GEM_BUG_ON(vma->obj != obj); GEM_BUG_ON(!atomic_read(&vma->open_count)); i915_vma_close(vma); } mutex_unlock(&ctx->lut_mutex); i915_gem_context_put(lut->ctx); i915_lut_handle_free(lut); i915_gem_object_put(obj); } } void __i915_gem_free_object_rcu(struct rcu_head *head) { struct drm_i915_gem_object *obj = container_of(head, typeof(*obj), rcu); struct drm_i915_private *i915 = to_i915(obj->base.dev); i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); atomic_dec(&i915->mm.free_count); } static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) { /* Skip serialisation and waking the device if known to be not used. */ if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) i915_gem_object_release_mmap_gtt(obj); if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { struct i915_mmap_offset *mmo, *mn; i915_gem_object_release_mmap_offset(obj); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { drm_vma_offset_remove(obj->base.dev->vma_offset_manager, &mmo->vma_node); kfree(mmo); } obj->mmo.offsets = RB_ROOT; } } /** * __i915_gem_object_pages_fini - Clean up pages use of a gem object * @obj: The gem object to clean up * * This function cleans up usage of the object mm.pages member. It * is intended for backends that need to clean up a gem object in * separate steps and needs to be called when the object is idle before * the object's backing memory is freed. */ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) { assert_object_held_shared(obj); if (!list_empty(&obj->vma.list)) { struct i915_vma *vma; spin_lock(&obj->vma.lock); while ((vma = list_first_entry_or_null(&obj->vma.list, struct i915_vma, obj_link))) { GEM_BUG_ON(vma->obj != obj); spin_unlock(&obj->vma.lock); i915_vma_destroy(vma); spin_lock(&obj->vma.lock); } spin_unlock(&obj->vma.lock); } __i915_gem_object_free_mmaps(obj); atomic_set(&obj->mm.pages_pin_count, 0); /* * dma_buf_unmap_attachment() requires reservation to be * locked. The imported GEM shouldn't share reservation lock * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for * dma-buf, so it's safe to take the lock. */ if (obj->base.import_attach) i915_gem_object_lock(obj, NULL); __i915_gem_object_put_pages(obj); if (obj->base.import_attach) i915_gem_object_unlock(obj); GEM_BUG_ON(i915_gem_object_has_pages(obj)); } void __i915_gem_free_object(struct drm_i915_gem_object *obj) { trace_i915_gem_object_destroy(obj); GEM_BUG_ON(!list_empty(&obj->lut_list)); bitmap_free(obj->bit_17); if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); drm_gem_free_mmap_offset(&obj->base); if (obj->ops->release) obj->ops->release(obj); if (obj->mm.n_placements > 1) kfree(obj->mm.placements); if (obj->shares_resv_from) i915_vm_resv_put(obj->shares_resv_from); __i915_gem_object_fini(obj); } static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { struct drm_i915_gem_object *obj, *on; llist_for_each_entry_safe(obj, on, freed, freed) { might_sleep(); if (obj->ops->delayed_free) { obj->ops->delayed_free(obj); continue; } __i915_gem_object_pages_fini(obj); __i915_gem_free_object(obj); /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); cond_resched(); } } void i915_gem_flush_free_objects(struct drm_i915_private *i915) { struct llist_node *freed = llist_del_all(&i915->mm.free_list); if (unlikely(freed)) __i915_gem_free_objects(i915, freed); } static void __i915_gem_free_work(struct work_struct *work) { struct drm_i915_private *i915 = container_of(work, struct drm_i915_private, mm.free_work); i915_gem_flush_free_objects(i915); } static void i915_gem_free_object(struct drm_gem_object *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); /* * Before we free the object, make sure any pure RCU-only * read-side critical sections are complete, e.g. * i915_gem_busy_ioctl(). For the corresponding synchronized * lookup see i915_gem_object_lookup_rcu(). */ atomic_inc(&i915->mm.free_count); /* * Since we require blocking on struct_mutex to unbind the freed * object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may * be a softirq context), but must instead then defer that work onto a * kthread. We use the RCU callback rather than move the freed object * directly onto the work queue so that we can mix between using the * worker and performing frees directly from subsequent allocations for * crude but effective memory throttling. */ if (llist_add(&obj->freed, &i915->mm.free_list)) queue_work(i915->wq, &i915->mm.free_work); } void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { struct intel_frontbuffer *front; front = i915_gem_object_get_frontbuffer(obj); if (front) { intel_frontbuffer_flush(front, origin); intel_frontbuffer_put(front); } } void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { struct intel_frontbuffer *front; front = i915_gem_object_get_frontbuffer(obj); if (front) { intel_frontbuffer_invalidate(front, origin); intel_frontbuffer_put(front); } } static void i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) { pgoff_t idx = offset >> PAGE_SHIFT; void *src_map; void *src_ptr; src_map = kmap_atomic(i915_gem_object_get_page(obj, idx)); src_ptr = src_map + offset_in_page(offset); if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) drm_clflush_virt_range(src_ptr, size); memcpy(dst, src_ptr, size); kunmap_atomic(src_map); } static void i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) { pgoff_t idx = offset >> PAGE_SHIFT; dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx); void __iomem *src_map; void __iomem *src_ptr; src_map = io_mapping_map_wc(&obj->mm.region->iomap, dma - obj->mm.region->region.start, PAGE_SIZE); src_ptr = src_map + offset_in_page(offset); if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) memcpy_fromio(dst, src_ptr, size); io_mapping_unmap(src_map); } static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_iomem(obj)); if (IS_DGFX(to_i915(obj->base.dev))) return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource); return true; } /** * i915_gem_object_read_from_page - read data from the page of a GEM object * @obj: GEM object to read from * @offset: offset within the object * @dst: buffer to store the read data * @size: size to read * * Reads data from @obj at the specified offset. The requested region to read * from can't cross a page boundary. The caller must ensure that @obj pages * are pinned and that @obj is synced wrt. any related writes. * * Return: %0 on success or -ENODEV if the type of @obj's backing store is * unsupported. */ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) { GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t)); GEM_BUG_ON(offset >= obj->base.size); GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (i915_gem_object_has_struct_page(obj)) i915_gem_object_read_from_page_kmap(obj, offset, dst, size); else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj)) i915_gem_object_read_from_page_iomap(obj, offset, dst, size); else return -ENODEV; return 0; } /** * i915_gem_object_evictable - Whether object is likely evictable after unbind. * @obj: The object to check * * This function checks whether the object is likely unvictable after unbind. * If the object is not locked when checking, the result is only advisory. * If the object is locked when checking, and the function returns true, * then an eviction should indeed be possible. But since unlocked vma * unpinning and unbinding is currently possible, the object can actually * become evictable even if this function returns false. * * Return: true if the object may be evictable. False otherwise. */ bool i915_gem_object_evictable(struct drm_i915_gem_object *obj) { struct i915_vma *vma; int pin_count = atomic_read(&obj->mm.pages_pin_count); if (!pin_count) return true; spin_lock(&obj->vma.lock); list_for_each_entry(vma, &obj->vma.list, obj_link) { if (i915_vma_is_pinned(vma)) { spin_unlock(&obj->vma.lock); return false; } if (atomic_read(&vma->pages_count)) pin_count--; } spin_unlock(&obj->vma.lock); GEM_WARN_ON(pin_count < 0); return pin_count == 0; } /** * i915_gem_object_migratable - Whether the object is migratable out of the * current region. * @obj: Pointer to the object. * * Return: Whether the object is allowed to be resident in other * regions than the current while pages are present. */ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = READ_ONCE(obj->mm.region); if (!mr) return false; return obj->mm.n_placements > 1; } /** * i915_gem_object_has_struct_page - Whether the object is page-backed * @obj: The object to query. * * This function should only be called while the object is locked or pinned, * otherwise the page backing may change under the caller. * * Return: True if page-backed, false otherwise. */ bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { #ifdef CONFIG_LOCKDEP if (IS_DGFX(to_i915(obj->base.dev)) && i915_gem_object_evictable((void __force *)obj)) assert_object_held_shared(obj); #endif return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE; } /** * i915_gem_object_has_iomem - Whether the object is iomem-backed * @obj: The object to query. * * This function should only be called while the object is locked or pinned, * otherwise the iomem backing may change under the caller. * * Return: True if iomem-backed, false otherwise. */ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) { #ifdef CONFIG_LOCKDEP if (IS_DGFX(to_i915(obj->base.dev)) && i915_gem_object_evictable((void __force *)obj)) assert_object_held_shared(obj); #endif return obj->mem_flags & I915_BO_FLAG_IOMEM; } /** * i915_gem_object_can_migrate - Whether an object likely can be migrated * * @obj: The object to migrate * @id: The region intended to migrate to * * Check whether the object backend supports migration to the * given region. Note that pinning may affect the ability to migrate as * returned by this function. * * This function is primarily intended as a helper for checking the * possibility to migrate objects and might be slightly less permissive * than i915_gem_object_migrate() when it comes to objects with the * I915_BO_ALLOC_USER flag set. * * Return: true if migration is possible, false otherwise. */ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, enum intel_region_id id) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int num_allowed = obj->mm.n_placements; struct intel_memory_region *mr; unsigned int i; GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); mr = i915->mm.regions[id]; if (!mr) return false; if (!IS_ALIGNED(obj->base.size, mr->min_page_size)) return false; if (obj->mm.region == mr) return true; if (!i915_gem_object_evictable(obj)) return false; if (!obj->ops->migrate) return false; if (!(obj->flags & I915_BO_ALLOC_USER)) return true; if (num_allowed == 0) return false; for (i = 0; i < num_allowed; ++i) { if (mr == obj->mm.placements[i]) return true; } return false; } /** * i915_gem_object_migrate - Migrate an object to the desired region id * @obj: The object to migrate. * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may * not be successful in evicting other objects to make room for this object. * @id: The region id to migrate to. * * Attempt to migrate the object to the desired memory region. The * object backend must support migration and the object may not be * pinned, (explicitly pinned pages or pinned vmas). The object must * be locked. * On successful completion, the object will have pages pointing to * memory in the new region, but an async migration task may not have * completed yet, and to accomplish that, i915_gem_object_wait_migration() * must be called. * * Note: the @ww parameter is not used yet, but included to make sure * callers put some effort into obtaining a valid ww ctx if one is * available. * * Return: 0 on success. Negative error code on failure. In particular may * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and * -EBUSY if the object is pinned. */ int i915_gem_object_migrate(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, enum intel_region_id id) { return __i915_gem_object_migrate(obj, ww, id, obj->flags); } /** * __i915_gem_object_migrate - Migrate an object to the desired region id, with * control of the extra flags * @obj: The object to migrate. * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may * not be successful in evicting other objects to make room for this object. * @id: The region id to migrate to. * @flags: The object flags. Normally just obj->flags. * * Attempt to migrate the object to the desired memory region. The * object backend must support migration and the object may not be * pinned, (explicitly pinned pages or pinned vmas). The object must * be locked. * On successful completion, the object will have pages pointing to * memory in the new region, but an async migration task may not have * completed yet, and to accomplish that, i915_gem_object_wait_migration() * must be called. * * Note: the @ww parameter is not used yet, but included to make sure * callers put some effort into obtaining a valid ww ctx if one is * available. * * Return: 0 on success. Negative error code on failure. In particular may * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and * -EBUSY if the object is pinned. */ int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, enum intel_region_id id, unsigned int flags) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_memory_region *mr; GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); assert_object_held(obj); mr = i915->mm.regions[id]; GEM_BUG_ON(!mr); if (!i915_gem_object_can_migrate(obj, id)) return -EINVAL; if (!obj->ops->migrate) { if (GEM_WARN_ON(obj->mm.region != mr)) return -EINVAL; return 0; } return obj->ops->migrate(obj, mr, flags); } /** * i915_gem_object_placement_possible - Check whether the object can be * placed at certain memory type * @obj: Pointer to the object * @type: The memory type to check * * Return: True if the object can be placed in @type. False otherwise. */ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, enum intel_memory_type type) { unsigned int i; if (!obj->mm.n_placements) { switch (type) { case INTEL_MEMORY_LOCAL: return i915_gem_object_has_iomem(obj); case INTEL_MEMORY_SYSTEM: return i915_gem_object_has_pages(obj); default: /* Ignore stolen for now */ GEM_BUG_ON(1); return false; } } for (i = 0; i < obj->mm.n_placements; i++) { if (obj->mm.placements[i]->type == type) return true; } return false; } /** * i915_gem_object_needs_ccs_pages - Check whether the object requires extra * pages when placed in system-memory, in order to save and later restore the * flat-CCS aux state when the object is moved between local-memory and * system-memory * @obj: Pointer to the object * * Return: True if the object needs extra ccs pages. False otherwise. */ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) { bool lmem_placement = false; int i; if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) return false; if (obj->flags & I915_BO_ALLOC_CCS_AUX) return true; for (i = 0; i < obj->mm.n_placements; i++) { /* Compression is not allowed for the objects with smem placement */ if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) return false; if (!lmem_placement && obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL) lmem_placement = true; } return lmem_placement; } void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); } void i915_objects_module_exit(void) { kmem_cache_destroy(slab_objects); } int __init i915_objects_module_init(void) { slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); if (!slab_objects) return -ENOMEM; return 0; } static const struct drm_gem_object_funcs i915_gem_object_funcs = { .free = i915_gem_free_object, .close = i915_gem_close_object, .export = i915_gem_prime_export, }; /** * i915_gem_object_get_moving_fence - Get the object's moving fence if any * @obj: The object whose moving fence to get. * @fence: The resulting fence * * A non-signaled moving fence means that there is an async operation * pending on the object that needs to be waited on before setting up * any GPU- or CPU PTEs to the object's pages. * * Return: Negative error code or 0 for success. */ int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, struct dma_fence **fence) { return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, fence); } /** * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any * @obj: The object whose moving fence to wait for. * @intr: Whether to wait interruptible. * * If the moving fence signaled without an error, it is detached from the * object and put. * * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, * negative error code if the async operation represented by the * moving fence failed. */ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, bool intr) { long ret; assert_object_held(obj); ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, intr, MAX_SCHEDULE_TIMEOUT); if (!ret) ret = -ETIME; else if (ret > 0 && i915_gem_object_has_unknown_state(obj)) ret = -EIO; return ret < 0 ? ret : 0; } /* * i915_gem_object_has_unknown_state - Return true if the object backing pages are * in an unknown_state. This means that userspace must NEVER be allowed to touch * the pages, with either the GPU or CPU. * * ONLY valid to be called after ensuring that all kernel fences have signalled * (in particular the fence for moving/clearing the object). */ bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj) { /* * The below barrier pairs with the dma_fence_signal() in * __memcpy_work(). We should only sample the unknown_state after all * the kernel fences have signalled. */ smp_rmb(); return obj->mm.unknown_state; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/huge_gem_object.c" #include "selftests/huge_pages.c" #include "selftests/i915_gem_migrate.c" #include "selftests/i915_gem_object.c" #include "selftests/i915_gem_coherency.c" #endif
linux-master
drivers/gpu/drm/i915/gem/i915_gem_object.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2012-2014 Intel Corporation * * Based on amdgpu_mn, which bears the following notice: * * Copyright 2014 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Christian König <[email protected]> */ #include <linux/mmu_context.h> #include <linux/mempolicy.h> #include <linux/swap.h> #include <linux/sched/mm.h> #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_gem_userptr.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER /** * i915_gem_userptr_invalidate - callback to notify about mm change * * @mni: the range (mm) is about to update * @range: details on the invalidation * @cur_seq: Value to pass to mmu_interval_set_seq() * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); struct drm_i915_private *i915 = to_i915(obj->base.dev); long r; if (!mmu_notifier_range_blockable(range)) return false; write_lock(&i915->mm.notifier_lock); mmu_interval_set_seq(mni, cur_seq); write_unlock(&i915->mm.notifier_lock); /* * We don't wait when the process is exiting. This is valid * because the object will be cleaned up anyway. * * This is also temporarily required as a hack, because we * cannot currently force non-consistent batch buffers to preempt * and reschedule by waiting on it, hanging processes on exit. */ if (current->flags & PF_EXITING) return true; /* we will unbind on next submission, still have userptr pins */ r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); return true; } static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = { .invalidate = i915_gem_userptr_invalidate, }; static int i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) { return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm, obj->userptr.ptr, obj->base.size, &i915_gem_userptr_notifier_ops); } static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) { struct page **pvec = NULL; assert_object_held_shared(obj); if (!--obj->userptr.page_ref) { pvec = obj->userptr.pvec; obj->userptr.pvec = NULL; } GEM_BUG_ON(obj->userptr.page_ref < 0); if (pvec) { const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; unpin_user_pages(pvec, num_pages); kvfree(pvec); } } static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; struct page **pvec; unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ int ret; if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) return -E2BIG; num_pages = obj->base.size >> PAGE_SHIFT; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; if (!obj->userptr.page_ref) { ret = -EAGAIN; goto err_free; } obj->userptr.page_ref++; pvec = obj->userptr.pvec; alloc_table: ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, num_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (ret) goto err; ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { sg_free_table(st); if (max_segment > PAGE_SIZE) { max_segment = PAGE_SIZE; goto alloc_table; } goto err; } WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; __i915_gem_object_set_pages(obj, st); return 0; err: i915_gem_object_userptr_drop_ref(obj); err_free: kfree(st); return ret; } static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { struct sgt_iter sgt_iter; struct page *page; if (!pages) return; __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); /* * We always mark objects as dirty when they are used by the GPU, * just in case. However, if we set the vma as being read-only we know * that the object will never have been written to. */ if (i915_gem_object_is_readonly(obj)) obj->mm.dirty = false; for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty && trylock_page(page)) { /* * As this may not be anonymous memory (e.g. shmem) * but exist on a real mapping, we have to lock * the page in order to dirty it -- holding * the page reference is not sufficient to * prevent the inode from being truncated. * Play safe and take the lock. * * However...! * * The mmu-notifier can be invalidated for a * migrate_folio, that is alreadying holding the lock * on the folio. Such a try_to_unmap() will result * in us calling put_pages() and so recursively try * to lock the page. We avoid that deadlock with * a trylock_page() and in exchange we risk missing * some page dirtying. */ set_page_dirty(page); unlock_page(page); } mark_page_accessed(page); } obj->mm.dirty = false; sg_free_table(pages); kfree(pages); i915_gem_object_userptr_drop_ref(obj); } static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj) { struct sg_table *pages; int err; err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); if (err) return err; if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) return -EBUSY; assert_object_held(obj); pages = __i915_gem_object_unset_pages(obj); if (!IS_ERR_OR_NULL(pages)) i915_gem_userptr_put_pages(obj, pages); return err; } int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct page **pvec; unsigned int gup_flags = 0; unsigned long notifier_seq; int pinned, ret; if (obj->userptr.notifier.mm != current->mm) return -EFAULT; notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) return ret; if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) { i915_gem_object_unlock(obj); return 0; } ret = i915_gem_object_userptr_unbind(obj); i915_gem_object_unlock(obj); if (ret) return ret; pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pvec) return -ENOMEM; if (!i915_gem_object_is_readonly(obj)) gup_flags |= FOLL_WRITE; pinned = 0; while (pinned < num_pages) { ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, num_pages - pinned, gup_flags, &pvec[pinned]); if (ret < 0) goto out; pinned += ret; } ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) goto out; if (mmu_interval_read_retry(&obj->userptr.notifier, !obj->userptr.page_ref ? notifier_seq : obj->userptr.notifier_seq)) { ret = -EAGAIN; goto out_unlock; } if (!obj->userptr.page_ref++) { obj->userptr.pvec = pvec; obj->userptr.notifier_seq = notifier_seq; pvec = NULL; ret = ____i915_gem_object_get_pages(obj); } obj->userptr.page_ref--; out_unlock: i915_gem_object_unlock(obj); out: if (pvec) { unpin_user_pages(pvec, pinned); kvfree(pvec); } return ret; } int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { if (mmu_interval_read_retry(&obj->userptr.notifier, obj->userptr.notifier_seq)) { /* We collided with the mmu notifier, need to retry */ return -EAGAIN; } return 0; } int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { int err; err = i915_gem_object_userptr_submit_init(obj); if (err) return err; err = i915_gem_object_lock_interruptible(obj, NULL); if (!err) { /* * Since we only check validity, not use the pages, * it doesn't matter if we collide with the mmu notifier, * and -EAGAIN handling is not required. */ err = i915_gem_object_pin_pages(obj); if (!err) i915_gem_object_unpin_pages(obj); i915_gem_object_unlock(obj); } return err; } static void i915_gem_userptr_release(struct drm_i915_gem_object *obj) { GEM_WARN_ON(obj->userptr.page_ref); mmu_interval_notifier_remove(&obj->userptr.notifier); obj->userptr.notifier.mm = NULL; } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) { drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n"); return -EINVAL; } static int i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *args) { drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n"); return -EINVAL; } static int i915_gem_userptr_pread(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *args) { drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n"); return -EINVAL; } static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .name = "i915_gem_object_userptr", .flags = I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_NO_MMAP | I915_GEM_OBJECT_IS_PROXY, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, .dmabuf_export = i915_gem_userptr_dmabuf_export, .pwrite = i915_gem_userptr_pwrite, .pread = i915_gem_userptr_pread, .release = i915_gem_userptr_release, }; #endif static int probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; unsigned long end = addr + len; mmap_read_lock(mm); for_each_vma_range(vmi, vma, end) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) break; addr = vma->vm_end; } mmap_read_unlock(mm); if (vma || addr < end) return -EFAULT; return 0; } /* * Creates a new mm object that wraps some normal memory from the process * context - user memory. * * We impose several restrictions upon the memory being mapped * into the GPU. * 1. It must be page aligned (both start/end addresses, i.e ptr and size). * 2. It must be normal system memory, not a pointer into another map of IO * space (e.g. it must not be a GTT mmapping of another object). * 3. We only allow a bo as large as we could in theory map into the GTT, * that is we limit the size to the total size of the GTT. * 4. The bo is marked as being snoopable. The backing pages are left * accessible directly by the CPU, but reads and writes by the GPU may * incur the cost of a snoop (unless you have an LLC architecture). * * Synchronisation between multiple users and the GPU is left to userspace * through the normal set-domain-ioctl. The kernel will enforce that the * GPU relinquishes the VMA before it is returned back to the system * i.e. upon free(), munmap() or process termination. However, the userspace * malloc() library may not immediately relinquish the VMA after free() and * instead reuse it whilst the GPU is still reading and writing to the VMA. * Caveat emptor. * * Also note, that the object created here is not currently a "first class" * object, in that several ioctls are banned. These are the CPU access * ioctls: mmap(), pwrite and pread. In practice, you are expected to use * direct access via your pointer rather than use those ioctls. Another * restriction is that we do not allow userptr surfaces to be pinned to the * hardware and so we reject any attempt to create a framebuffer out of a * userptr. * * If you think this is a good interface to use to pass GPU memory between * drivers, please use dma-buf instead. In fact, wherever possible use * dma-buf instead. */ int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { static struct lock_class_key __maybe_unused lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object __maybe_unused *obj; int __maybe_unused ret; u32 __maybe_unused handle; if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { /* We cannot support coherent userptr objects on hw without * LLC and broken snooping. */ return -ENODEV; } if (args->flags & ~(I915_USERPTR_READ_ONLY | I915_USERPTR_UNSYNCHRONIZED | I915_USERPTR_PROBE)) return -EINVAL; if (i915_gem_object_size_2big(args->user_size)) return -E2BIG; if (!args->user_size) return -EINVAL; if (offset_in_page(args->user_ptr | args->user_size)) return -EINVAL; if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) return -EFAULT; if (args->flags & I915_USERPTR_UNSYNCHRONIZED) return -ENODEV; if (args->flags & I915_USERPTR_READ_ONLY) { /* * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ if (!to_gt(dev_priv)->vm->has_read_only) return -ENODEV; } if (args->flags & I915_USERPTR_PROBE) { /* * Check that the range pointed to represents real struct * pages and not iomappings (at this moment in time!) */ ret = probe_range(current->mm, args->user_ptr, args->user_size); if (ret) return ret; } #ifdef CONFIG_MMU_NOTIFIER obj = i915_gem_object_alloc(); if (obj == NULL) return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, I915_BO_ALLOC_USER); obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); obj->userptr.ptr = args->user_ptr; obj->userptr.notifier_seq = ULONG_MAX; if (args->flags & I915_USERPTR_READ_ONLY) i915_gem_object_set_readonly(obj); /* And keep a pointer to the current->mm for resolving the user pages * at binding. This means that we need to hook into the mmu_notifier * in order to detect if the mmu is destroyed. */ ret = i915_gem_userptr_init__mmu_notifier(obj); if (ret == 0) ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ i915_gem_object_put(obj); if (ret) return ret; args->handle = handle; return 0; #else return -ENODEV; #endif } int i915_gem_init_userptr(struct drm_i915_private *dev_priv) { #ifdef CONFIG_MMU_NOTIFIER rwlock_init(&dev_priv->mm.notifier_lock); #endif return 0; } void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) { }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <linux/shmem_fs.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> #include <drm/drm_buddy.h> #include "i915_drv.h" #include "i915_ttm_buddy_manager.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" #include "gem/i915_gem_mman.h" #include "gem/i915_gem_object.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" #include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" #include "gt/intel_gpu_commands.h" #define I915_TTM_PRIO_PURGE 0 #define I915_TTM_PRIO_NO_PAGES 1 #define I915_TTM_PRIO_HAS_PAGES 2 #define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3 /* * Size of struct ttm_place vector in on-stack struct ttm_placement allocs */ #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN /** * struct i915_ttm_tt - TTM page vector with additional private information * @ttm: The base TTM page vector. * @dev: The struct device used for dma mapping and unmapping. * @cached_rsgt: The cached scatter-gather table. * @is_shmem: Set if using shmem. * @filp: The shmem file, if using shmem backend. * * Note that DMA may be going on right up to the point where the page- * vector is unpopulated in delayed destroy. Hence keep the * scatter-gather table mapped and cached up to that point. This is * different from the cached gem object io scatter-gather table which * doesn't have an associated dma mapping. */ struct i915_ttm_tt { struct ttm_tt ttm; struct device *dev; struct i915_refct_sgt cached_rsgt; bool is_shmem; struct file *filp; }; static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .mem_type = I915_PL_SYSTEM, .flags = 0, }; static struct ttm_placement i915_sys_placement = { .num_placement = 1, .placement = &sys_placement_flags, .num_busy_placement = 1, .busy_placement = &sys_placement_flags, }; /** * i915_ttm_sys_placement - Return the struct ttm_placement to be * used for an object in system memory. * * Rather than making the struct extern, use this * function. * * Return: A pointer to a static variable for sys placement. */ struct ttm_placement *i915_ttm_sys_placement(void) { return &i915_sys_placement; } static int i915_ttm_err_to_gem(int err) { /* Fastpath */ if (likely(!err)) return 0; switch (err) { case -EBUSY: /* * TTM likes to convert -EDEADLK to -EBUSY, and wants us to * restart the operation, since we don't record the contending * lock. We use -EAGAIN to restart. */ return -EAGAIN; case -ENOSPC: /* * Memory type / region is full, and we can't evict. * Except possibly system, that returns -ENOMEM; */ return -ENXIO; default: break; } return err; } static enum ttm_caching i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) { /* * Objects only allowed in system get cached cpu-mappings, or when * evicting lmem-only buffers to system for swapping. Other objects get * WC mapping for now. Even if in system. */ if (obj->mm.n_placements <= 1) return ttm_cached; return ttm_write_combined; } static void i915_ttm_place_from_region(const struct intel_memory_region *mr, struct ttm_place *place, resource_size_t offset, resource_size_t size, unsigned int flags) { memset(place, 0, sizeof(*place)); place->mem_type = intel_region_to_ttm_type(mr); if (mr->type == INTEL_MEMORY_SYSTEM) return; if (flags & I915_BO_ALLOC_CONTIGUOUS) place->flags |= TTM_PL_FLAG_CONTIGUOUS; if (offset != I915_BO_INVALID_OFFSET) { WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn)); place->fpfn = offset >> PAGE_SHIFT; WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn)); place->lpfn = place->fpfn + (size >> PAGE_SHIFT); } else if (mr->io_size && mr->io_size < mr->total) { if (flags & I915_BO_ALLOC_GPU_ONLY) { place->flags |= TTM_PL_FLAG_TOPDOWN; } else { place->fpfn = 0; WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn)); place->lpfn = mr->io_size >> PAGE_SHIFT; } } } static void i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, struct ttm_place *requested, struct ttm_place *busy, struct ttm_placement *placement) { unsigned int num_allowed = obj->mm.n_placements; unsigned int flags = obj->flags; unsigned int i; placement->num_placement = 1; i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : obj->mm.region, requested, obj->bo_offset, obj->base.size, flags); /* Cache this on object? */ placement->num_busy_placement = num_allowed; for (i = 0; i < placement->num_busy_placement; ++i) i915_ttm_place_from_region(obj->mm.placements[i], busy + i, obj->bo_offset, obj->base.size, flags); if (num_allowed == 0) { *busy = *requested; placement->num_busy_placement = 1; } placement->placement = requested; placement->busy_placement = busy; } static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; struct file *filp = i915_tt->filp; struct sgt_iter sgt_iter; struct sg_table *st; struct page *page; unsigned long i; int err; if (!filp) { struct address_space *mapping; gfp_t mask; filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); if (IS_ERR(filp)) return PTR_ERR(filp); mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; mapping = filp->f_mapping; mapping_set_gfp_mask(mapping, mask); GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); i915_tt->filp = filp; } st = &i915_tt->cached_rsgt.table; err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, max_segment); if (err) return err; err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (err) goto err_free_st; i = 0; for_each_sgt_page(page, sgt_iter, st) ttm->pages[i++] = page; if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; return 0; err_free_st: shmem_sg_free_table(st, filp->f_mapping, false, false); return err; } static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; struct sg_table *st = &i915_tt->cached_rsgt.table; shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, backup, backup); } static void i915_ttm_tt_release(struct kref *ref) { struct i915_ttm_tt *i915_tt = container_of(ref, typeof(*i915_tt), cached_rsgt.kref); struct sg_table *st = &i915_tt->cached_rsgt.table; GEM_WARN_ON(st->sgl); kfree(i915_tt); } static const struct i915_refct_sgt_ops tt_rsgt_ops = { .release = i915_ttm_tt_release }; static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); unsigned long ccs_pages = 0; enum ttm_caching caching; struct i915_ttm_tt *i915_tt; int ret; if (i915_ttm_is_ghost_object(bo)) return NULL; i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); if (!i915_tt) return NULL; if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource || ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt)) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; caching = i915_ttm_select_tt_caching(obj); if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; i915_tt->is_shmem = true; } if (i915_gem_object_needs_ccs_pages(obj)) ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size, NUM_BYTES_PER_CCS_BYTE), PAGE_SIZE); ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages); if (ret) goto err_free; __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, &tt_rsgt_ops); i915_tt->dev = obj->base.dev->dev; return &i915_tt->ttm; err_free: kfree(i915_tt); return NULL; } static int i915_ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); if (i915_tt->is_shmem) return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); return ttm_pool_alloc(&bdev->pool, ttm, ctx); } static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct sg_table *st = &i915_tt->cached_rsgt.table; if (st->sgl) dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); if (i915_tt->is_shmem) { i915_ttm_tt_shmem_unpopulate(ttm); } else { sg_free_table(st); ttm_pool_free(&bdev->pool, ttm); } } static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); if (i915_tt->filp) fput(i915_tt->filp); ttm_tt_fini(ttm); i915_refct_sgt_put(&i915_tt->cached_rsgt); } static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); if (i915_ttm_is_ghost_object(bo)) return false; /* * EXTERNAL objects should never be swapped out by TTM, instead we need * to handle that ourselves. TTM will already skip such objects for us, * but we would like to avoid grabbing locks for no good reason. */ if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return false; /* Will do for now. Our pinned objects are still on TTM's LRU lists */ if (!i915_gem_object_evictable(obj)) return false; return ttm_bo_eviction_valuable(bo, place); } static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { *placement = i915_sys_placement; } /** * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information * @obj: The GEM object * This function frees any LMEM-related information that is cached on * the object. For example the radix tree for fast page lookup and the * cached refcounted sg-table */ void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; if (!obj->ttm.cached_io_rsgt) return; rcu_read_lock(); radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0) radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); rcu_read_unlock(); i915_refct_sgt_put(obj->ttm.cached_io_rsgt); obj->ttm.cached_io_rsgt = NULL; } /** * i915_ttm_purge - Clear an object of its memory * @obj: The object * * This function is called to clear an object of it's memory when it is * marked as not needed anymore. * * Return: 0 on success, negative error code on failure. */ int i915_ttm_purge(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct i915_ttm_tt *i915_tt = container_of(bo->ttm, typeof(*i915_tt), ttm); struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, }; struct ttm_placement place = {}; int ret; if (obj->mm.madv == __I915_MADV_PURGED) return 0; ret = ttm_bo_validate(bo, &place, &ctx); if (ret) return ret; if (bo->ttm && i915_tt->filp) { /* * The below fput(which eventually calls shmem_truncate) might * be delayed by worker, so when directly called to purge the * pages(like by the shrinker) we should try to be more * aggressive and release the pages immediately. */ shmem_truncate_range(file_inode(i915_tt->filp), 0, (loff_t)-1); fput(fetch_and_zero(&i915_tt->filp)); } obj->write_domain = 0; obj->read_domains = 0; i915_ttm_adjust_gem_after_move(obj); i915_ttm_free_cached_io_rsgt(obj); obj->mm.madv = __I915_MADV_PURGED; return 0; } static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct i915_ttm_tt *i915_tt = container_of(bo->ttm, typeof(*i915_tt), ttm); struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT, }; struct ttm_placement place = {}; int ret; if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource)) return 0; GEM_BUG_ON(!i915_tt->is_shmem); if (!i915_tt->filp) return 0; ret = ttm_bo_wait_ctx(bo, &ctx); if (ret) return ret; switch (obj->mm.madv) { case I915_MADV_DONTNEED: return i915_ttm_purge(obj); case __I915_MADV_PURGED: return 0; } if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) return 0; bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; ret = ttm_bo_validate(bo, &place, &ctx); if (ret) { bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; return ret; } if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK) __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); return 0; } static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); /* * This gets called twice by ttm, so long as we have a ttm resource or * ttm_tt then we can still safely call this. Due to pipeline-gutting, * we maybe have NULL bo->resource, but in that case we should always * have a ttm alive (like if the pages are swapped out). */ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { __i915_gem_object_pages_fini(obj); i915_ttm_free_cached_io_rsgt(obj); } } static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct sg_table *st; int ret; if (i915_tt->cached_rsgt.table.sgl) return i915_refct_sgt_get(&i915_tt->cached_rsgt); st = &i915_tt->cached_rsgt.table; ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, i915_sg_segment_size(i915_tt->dev), GFP_KERNEL); if (ret) { st->sgl = NULL; return ERR_PTR(ret); } ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); if (ret) { sg_free_table(st); return ERR_PTR(ret); } return i915_refct_sgt_get(&i915_tt->cached_rsgt); } /** * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the * resource memory * @obj: The GEM object used for sg-table caching * @res: The struct ttm_resource for which an sg-table is requested. * * This function returns a refcounted sg-table representing the memory * pointed to by @res. If @res is the object's current resource it may also * cache the sg_table on the object or attempt to access an already cached * sg-table. The refcounted sg-table needs to be put when no-longer in use. * * Return: A valid pointer to a struct i915_refct_sgt or error pointer on * failure. */ struct i915_refct_sgt * i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct ttm_resource *res) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); u32 page_alignment; if (!i915_ttm_gtt_binds_lmem(res)) return i915_ttm_tt_get_st(bo->ttm); page_alignment = bo->page_alignment << PAGE_SHIFT; if (!page_alignment) page_alignment = obj->mm.region->min_page_size; /* * If CPU mapping differs, we need to add the ttm_tt pages to * the resulting st. Might make sense for GGTT. */ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res)); if (bo->resource == res) { if (!obj->ttm.cached_io_rsgt) { struct i915_refct_sgt *rsgt; rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, res, page_alignment); if (IS_ERR(rsgt)) return rsgt; obj->ttm.cached_io_rsgt = rsgt; } return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); } return intel_region_ttm_resource_to_rsgt(obj->mm.region, res, page_alignment); } static int i915_ttm_truncate(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); long err; WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, true, 15 * HZ); if (err < 0) return err; if (err == 0) return -EBUSY; err = i915_ttm_move_notify(bo); if (err) return err; return i915_ttm_purge(obj); } static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); int ret; if (i915_ttm_is_ghost_object(bo)) return; ret = i915_ttm_move_notify(bo); GEM_WARN_ON(ret); GEM_WARN_ON(obj->ttm.cached_io_rsgt); if (!ret && obj->mm.madv != I915_MADV_WILLNEED) i915_ttm_purge(obj); } /** * i915_ttm_resource_mappable - Return true if the ttm resource is CPU * accessible. * @res: The TTM resource to check. * * This is interesting on small-BAR systems where we may encounter lmem objects * that can't be accessed via the CPU. */ bool i915_ttm_resource_mappable(struct ttm_resource *res) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); if (!i915_ttm_cpu_maps_iomem(res)) return true; return bman_res->used_visible_size == PFN_UP(bman_res->base.size); } static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo); bool unknown_state; if (i915_ttm_is_ghost_object(mem->bo)) return -EINVAL; if (!kref_get_unless_zero(&obj->base.refcount)) return -EINVAL; assert_object_held(obj); unknown_state = i915_gem_object_has_unknown_state(obj); i915_gem_object_put(obj); if (unknown_state) return -EINVAL; if (!i915_ttm_cpu_maps_iomem(mem)) return 0; if (!i915_ttm_resource_mappable(mem)) return -EINVAL; mem->bus.caching = ttm_write_combined; mem->bus.is_iomem = true; return 0; } static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); struct scatterlist *sg; unsigned long base; unsigned int ofs; GEM_BUG_ON(i915_ttm_is_ghost_object(bo)); GEM_WARN_ON(bo->ttm); base = obj->mm.region->iomap.base - obj->mm.region->region.start; sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs); return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; } static int i915_ttm_access_memory(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); resource_size_t iomap = obj->mm.region->iomap.base - obj->mm.region->region.start; unsigned long page = offset >> PAGE_SHIFT; unsigned long bytes_left = len; /* * TODO: For now just let it fail if the resource is non-mappable, * otherwise we need to perform the memcpy from the gpu here, without * interfering with the object (like moving the entire thing). */ if (!i915_ttm_resource_mappable(bo->resource)) return -EIO; offset -= page << PAGE_SHIFT; do { unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); void __iomem *ptr; dma_addr_t daddr; daddr = i915_gem_object_get_dma_address(obj, page); ptr = ioremap_wc(iomap + daddr + offset, bytes); if (!ptr) return -EIO; if (write) memcpy_toio(ptr, buf, bytes); else memcpy_fromio(buf, ptr, bytes); iounmap(ptr); page++; buf += bytes; bytes_left -= bytes; offset = 0; } while (bytes_left); return len; } /* * All callbacks need to take care not to downcast a struct ttm_buffer_object * without checking its subclass, since it might be a TTM ghost object. */ static struct ttm_device_funcs i915_ttm_bo_driver = { .ttm_tt_create = i915_ttm_tt_create, .ttm_tt_populate = i915_ttm_tt_populate, .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, .ttm_tt_destroy = i915_ttm_tt_destroy, .eviction_valuable = i915_ttm_eviction_valuable, .evict_flags = i915_ttm_evict_flags, .move = i915_ttm_move, .swap_notify = i915_ttm_swap_notify, .delete_mem_notify = i915_ttm_delete_mem_notify, .io_mem_reserve = i915_ttm_io_mem_reserve, .io_mem_pfn = i915_ttm_io_mem_pfn, .access_memory = i915_ttm_access_memory, }; /** * i915_ttm_driver - Return a pointer to the TTM device funcs * * Return: Pointer to statically allocated TTM device funcs. */ struct ttm_device_funcs *i915_ttm_driver(void) { return &i915_ttm_bo_driver; } static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, struct ttm_placement *placement) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, }; int real_num_busy; int ret; /* First try only the requested placement. No eviction. */ real_num_busy = fetch_and_zero(&placement->num_busy_placement); ret = ttm_bo_validate(bo, placement, &ctx); if (ret) { ret = i915_ttm_err_to_gem(ret); /* * Anything that wants to restart the operation gets to * do that. */ if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS || ret == -EAGAIN) return ret; /* * If the initial attempt fails, allow all accepted placements, * evicting if necessary. */ placement->num_busy_placement = real_num_busy; ret = ttm_bo_validate(bo, placement, &ctx); if (ret) return i915_ttm_err_to_gem(ret); } if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); if (ret) return ret; i915_ttm_adjust_domains_after_move(obj); i915_ttm_adjust_gem_after_move(obj); } if (!i915_gem_object_has_pages(obj)) { struct i915_refct_sgt *rsgt = i915_ttm_resource_get_st(obj, bo->resource); if (IS_ERR(rsgt)) return PTR_ERR(rsgt); GEM_BUG_ON(obj->mm.rsgt); obj->mm.rsgt = rsgt; __i915_gem_object_set_pages(obj, &rsgt->table); } GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages)); i915_ttm_adjust_lru(obj); return ret; } static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) { struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS]; struct ttm_placement placement; /* restricted by sg_alloc_table */ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int)) return -E2BIG; GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS); /* Move to the requested placement. */ i915_ttm_placement_from_obj(obj, &requested, busy, &placement); return __i915_ttm_get_pages(obj, &placement); } /** * DOC: Migration vs eviction * * GEM migration may not be the same as TTM migration / eviction. If * the TTM core decides to evict an object it may be evicted to a * TTM memory type that is not in the object's allowable GEM regions, or * in fact theoretically to a TTM memory type that doesn't correspond to * a GEM memory region. In that case the object's GEM region is not * updated, and the data is migrated back to the GEM region at * get_pages time. TTM may however set up CPU ptes to the object even * when it is evicted. * Gem forced migration using the i915_ttm_migrate() op, is allowed even * to regions that are not in the object's list of allowable placements. */ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, struct intel_memory_region *mr, unsigned int flags) { struct ttm_place requested; struct ttm_placement placement; int ret; i915_ttm_place_from_region(mr, &requested, obj->bo_offset, obj->base.size, flags); placement.num_placement = 1; placement.num_busy_placement = 1; placement.placement = &requested; placement.busy_placement = &requested; ret = __i915_ttm_get_pages(obj, &placement); if (ret) return ret; /* * Reinitialize the region bindings. This is primarily * required for objects where the new region is not in * its allowable placements. */ if (obj->mm.region != mr) { i915_gem_object_release_memory_region(obj); i915_gem_object_init_memory_region(obj, mr); } return 0; } static int i915_ttm_migrate(struct drm_i915_gem_object *obj, struct intel_memory_region *mr, unsigned int flags) { return __i915_ttm_migrate(obj, mr, flags); } static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, struct sg_table *st) { /* * We're currently not called from a shrinker, so put_pages() * typically means the object is about to destroyed, or called * from move_notify(). So just avoid doing much for now. * If the object is not destroyed next, The TTM eviction logic * and shrinkers will move it out if needed. */ if (obj->mm.rsgt) i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); } /** * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. * @obj: The object */ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct i915_ttm_tt *i915_tt = container_of(bo->ttm, typeof(*i915_tt), ttm); bool shrinkable = bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); /* * Don't manipulate the TTM LRUs while in TTM bo destruction. * We're called through i915_ttm_delete_mem_notify(). */ if (!kref_read(&bo->kref)) return; /* * We skip managing the shrinker LRU in set_pages() and just manage * everything here. This does at least solve the issue with having * temporary shmem mappings(like with evicted lmem) not being visible to * the shrinker. Only our shmem objects are shrinkable, everything else * we keep as unshrinkable. * * To make sure everything plays nice we keep an extra shrink pin in TTM * if the underlying pages are not currently shrinkable. Once we release * our pin, like when the pages are moved to shmem, the pages will then * be added to the shrinker LRU, assuming the caller isn't also holding * a pin. * * TODO: consider maybe also bumping the shrinker list here when we have * already unpinned it, which should give us something more like an LRU. * * TODO: There is a small window of opportunity for this function to * get called from eviction after we've dropped the last GEM refcount, * but before the TTM deleted flag is set on the object. Avoid * adjusting the shrinker list in such cases, since the object is * not available to the shrinker anyway due to its zero refcount. * To fix this properly we should move to a TTM shrinker LRU list for * these objects. */ if (kref_get_unless_zero(&obj->base.refcount)) { if (shrinkable != obj->mm.ttm_shrinkable) { if (shrinkable) { if (obj->mm.madv == I915_MADV_WILLNEED) __i915_gem_object_make_shrinkable(obj); else __i915_gem_object_make_purgeable(obj); } else { i915_gem_object_make_unshrinkable(obj); } obj->mm.ttm_shrinkable = shrinkable; } i915_gem_object_put(obj); } /* * Put on the correct LRU list depending on the MADV status */ spin_lock(&bo->bdev->lru_lock); if (shrinkable) { /* Try to keep shmem_tt from being considered for shrinking. */ bo->priority = TTM_MAX_BO_PRIORITY - 1; } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { bo->priority = I915_TTM_PRIO_NO_PAGES; } else { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->resource->mem_type); /* * If we need to place an LMEM resource which doesn't need CPU * access then we should try not to victimize mappable objects * first, since we likely end up stealing more of the mappable * portion. And likewise when we try to find space for a mappble * object, we know not to ever victimize objects that don't * occupy any mappable pages. */ if (i915_ttm_cpu_maps_iomem(bo->resource) && i915_ttm_buddy_man_visible_size(man) < man->size && !(obj->flags & I915_BO_ALLOC_GPU_ONLY)) bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS; else bo->priority = I915_TTM_PRIO_HAS_PAGES; } ttm_bo_move_to_lru_tail(bo); spin_unlock(&bo->bdev->lru_lock); } /* * TTM-backed gem object destruction requires some clarification. * Basically we have two possibilities here. We can either rely on the * i915 delayed destruction and put the TTM object when the object * is idle. This would be detected by TTM which would bypass the * TTM delayed destroy handling. The other approach is to put the TTM * object early and rely on the TTM destroyed handling, and then free * the leftover parts of the GEM object once TTM's destroyed list handling is * complete. For now, we rely on the latter for two reasons: * a) TTM can evict an object even when it's on the delayed destroy list, * which in theory allows for complete eviction. * b) There is work going on in TTM to allow freeing an object even when * it's not idle, and using the TTM destroyed list handling could help us * benefit from that. */ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!obj->ttm.created); ttm_bo_put(i915_gem_to_ttm(obj)); } static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) { struct vm_area_struct *area = vmf->vma; struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); intel_wakeref_t wakeref = 0; vm_fault_t ret; int idx; /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; ret = ttm_bo_vm_reserve(bo, vmf); if (ret) return ret; if (obj->mm.madv != I915_MADV_WILLNEED) { dma_resv_unlock(bo->base.resv); return VM_FAULT_SIGBUS; } /* * This must be swapped out with shmem ttm_tt (pipeline-gutting). * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as * far as far doing a ttm_bo_move_null(), which should skip all the * other junk. */ if (!bo->resource) { struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = true, /* should be idle already */ }; int err; GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); if (err) { dma_resv_unlock(bo->base.resv); return VM_FAULT_SIGBUS; } } else if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; for (i = 0; i < obj->mm.n_placements; i++) { struct intel_memory_region *mr = obj->mm.placements[i]; unsigned int flags; if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) continue; flags = obj->flags; flags &= ~I915_BO_ALLOC_GPU_ONLY; err = __i915_ttm_migrate(obj, mr, flags); if (!err) break; } if (err) { drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n", ERR_PTR(err)); dma_resv_unlock(bo->base.resv); ret = VM_FAULT_SIGBUS; goto out_rpm; } } if (i915_ttm_cpu_maps_iomem(bo->resource)) wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) goto out_rpm; /* * ttm_bo_vm_reserve() already has dma_resv_lock. * userfault_count is protected by dma_resv lock and rpm wakeref. */ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) { obj->userfault_count = 1; spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list); spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); } if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); i915_ttm_adjust_lru(obj); dma_resv_unlock(bo->base.resv); out_rpm: if (wakeref) intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); return ret; } static int vm_access_ttm(struct vm_area_struct *area, unsigned long addr, void *buf, int len, int write) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(area->vm_private_data); if (i915_gem_object_is_readonly(obj) && write) return -EACCES; return ttm_bo_vm_access(area, addr, buf, len, write); } static void ttm_vm_open(struct vm_area_struct *vma) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(vma->vm_private_data); GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data)); i915_gem_object_get(obj); } static void ttm_vm_close(struct vm_area_struct *vma) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(vma->vm_private_data); GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data)); i915_gem_object_put(obj); } static const struct vm_operations_struct vm_ops_ttm = { .fault = vm_fault_ttm, .access = vm_access_ttm, .open = ttm_vm_open, .close = ttm_vm_close, }; static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) { /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node)); return drm_vma_node_offset_addr(&obj->base.vma_node); } static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); intel_wakeref_t wakeref = 0; assert_object_held_shared(obj); if (i915_ttm_cpu_maps_iomem(bo->resource)) { wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); /* userfault_count is protected by obj lock and rpm wakeref. */ if (obj->userfault_count) { spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); list_del(&obj->userfault_link); spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); obj->userfault_count = 0; } } GEM_WARN_ON(obj->userfault_count); ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); if (wakeref) intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); } static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", .flags = I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, .truncate = i915_ttm_truncate, .shrink = i915_ttm_shrink, .adjust_lru = i915_ttm_adjust_lru, .delayed_free = i915_ttm_delayed_free, .migrate = i915_ttm_migrate, .mmap_offset = i915_ttm_mmap_offset, .unmap_virtual = i915_ttm_unmap_virtual, .mmap_ops = &vm_ops_ttm, }; void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); i915_gem_object_release_memory_region(obj); mutex_destroy(&obj->ttm.get_io_page.lock); if (obj->ttm.created) { /* * We freely manage the shrinker LRU outide of the mm.pages life * cycle. As a result when destroying the object we should be * extra paranoid and ensure we remove it from the LRU, before * we free the object. * * Touching the ttm_shrinkable outside of the object lock here * should be safe now that the last GEM object ref was dropped. */ if (obj->mm.ttm_shrinkable) i915_gem_object_make_unshrinkable(obj); i915_ttm_backup_free(obj); /* This releases all gem object bindings to the backend. */ __i915_gem_free_object(obj); call_rcu(&obj->rcu, __i915_gem_free_object_rcu); } else { __i915_gem_object_fini(obj); } } /* * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object * @mem: The initial memory region for the object. * @obj: The gem object. * @size: Object size in bytes. * @flags: gem object flags. * * Return: 0 on success, negative error code on failure. */ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, resource_size_t offset, resource_size_t size, resource_size_t page_size, unsigned int flags) { static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, }; enum ttm_bo_type bo_type; int ret; drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); obj->bo_offset = offset; /* Don't put on a region list until we're either locked or fully initialized. */ obj->mm.region = mem; INIT_LIST_HEAD(&obj->mm.region_link); INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->ttm.get_io_page.lock); bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : ttm_bo_type_kernel; obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); /* Forcing the page size is kernel internal only */ GEM_BUG_ON(page_size && obj->mm.n_placements); /* * Keep an extra shrink pin to prevent the object from being made * shrinkable too early. If the ttm_tt is ever allocated in shmem, we * drop the pin. The TTM backend manages the shrinker LRU itself, * outside of the normal mm.pages life cycle. */ i915_gem_object_make_unshrinkable(obj); /* * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. * Similarly, in delayed_destroy, we can't call ttm_bo_put() * until successful initialization. */ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type, &i915_sys_placement, page_size >> PAGE_SHIFT, &ctx, NULL, NULL, i915_ttm_bo_destroy); /* * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size * is too big to add vma. The direct function that returns -ENOSPC is * drm_mm_insert_node_in_range(). To handle the same error as other code * that returns -E2BIG when the size is too large, it converts -ENOSPC to * -E2BIG. */ if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC) ret = -E2BIG; if (ret) return i915_ttm_err_to_gem(ret); obj->ttm.created = true; i915_gem_object_release_memory_region(obj); i915_gem_object_init_memory_region(obj, mem); i915_ttm_adjust_domains_after_move(obj); i915_ttm_adjust_gem_after_move(obj); i915_gem_object_unlock(obj); return 0; } static const struct intel_memory_region_ops ttm_system_region_ops = { .init_object = __i915_gem_ttm_object_init, .release = intel_region_ttm_fini, }; struct intel_memory_region * i915_gem_ttm_system_setup(struct drm_i915_private *i915, u16 type, u16 instance) { struct intel_memory_region *mr; mr = intel_memory_region_create(i915, 0, totalram_pages() << PAGE_SHIFT, PAGE_SIZE, 0, 0, type, instance, &ttm_system_region_ops); if (IS_ERR(mr)) return mr; intel_memory_region_set_name(mr, "system-ttm"); return mr; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/pagevec.h> #include <linux/shmem_fs.h> #include <linux/swap.h> #include <drm/drm_cache.h> #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_gem_tiling.h" #include "i915_gemfs.h" #include "i915_scatterlist.h" #include "i915_trace.h" /* * Move folios to appropriate lru and release the batch, decrementing the * ref count of those folios. */ static void check_release_folio_batch(struct folio_batch *fbatch) { check_move_unevictable_folios(fbatch); __folio_batch_release(fbatch); cond_resched(); } void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, bool dirty, bool backup) { struct sgt_iter sgt_iter; struct folio_batch fbatch; struct folio *last = NULL; struct page *page; mapping_clear_unevictable(mapping); folio_batch_init(&fbatch); for_each_sgt_page(page, sgt_iter, st) { struct folio *folio = page_folio(page); if (folio == last) continue; last = folio; if (dirty) folio_mark_dirty(folio); if (backup) folio_mark_accessed(folio); if (!folio_batch_add(&fbatch, folio)) check_release_folio_batch(&fbatch); } if (fbatch.nr) check_release_folio_batch(&fbatch); sg_free_table(st); } int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, size_t size, struct intel_memory_region *mr, struct address_space *mapping, unsigned int max_segment) { unsigned int page_count; /* restricted by sg_alloc_table */ unsigned long i; struct scatterlist *sg; unsigned long next_pfn = 0; /* suppress gcc warning */ gfp_t noreclaim; int ret; if (overflows_type(size / PAGE_SIZE, page_count)) return -E2BIG; page_count = size / PAGE_SIZE; /* * If there's no chance of allocating enough pages for the whole * object, bail early. */ if (size > resource_size(&mr->region)) return -ENOMEM; if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN)) return -ENOMEM; /* * Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. * * Fail silently without starting the shrinker */ mapping_set_unevictable(mapping); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; st->nents = 0; for (i = 0; i < page_count; i++) { struct folio *folio; const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 0, }, *s = shrink; gfp_t gfp = noreclaim; do { cond_resched(); folio = shmem_read_folio_gfp(mapping, i, gfp); if (!IS_ERR(folio)) break; if (!*s) { ret = PTR_ERR(folio); goto err_sg; } i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++); /* * We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. * * However, since graphics tend to be disposable, * defer the oom here by reporting the ENOMEM back * to userspace. */ if (!*s) { /* reclaim and warn, but no oom */ gfp = mapping_gfp_mask(mapping); /* * Our bo are always dirty and so we require * kswapd to reclaim our pages (direct reclaim * does not effectively begin pageout of our * buffers on its own). However, direct reclaim * only waits for kswapd when under allocation * congestion. So as a result __GFP_RECLAIM is * unreliable and fails to actually reclaim our * dirty pages -- unless you try over and over * again with !__GFP_NORETRY. However, we still * want to fail this allocation rather than * trigger the out-of-memory killer and for * this we want __GFP_RETRY_MAYFAIL. */ gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN; } } while (1); if (!i || sg->length >= max_segment || folio_pfn(folio) != next_pfn) { if (i) sg = sg_next(sg); st->nents++; sg_set_folio(sg, folio, folio_size(folio), 0); } else { /* XXX: could overflow? */ sg->length += folio_size(folio); } next_pfn = folio_pfn(folio) + folio_nr_pages(folio); i += folio_nr_pages(folio) - 1; /* Check that the i965g/gm workaround works. */ GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL); } if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); /* Trim unused sg entries to avoid wasting memory. */ i915_sg_trim(st); return 0; err_sg: sg_mark_end(sg); if (sg != st->sgl) { shmem_sg_free_table(st, mapping, false, false); } else { mapping_clear_unevictable(mapping); sg_free_table(st); } /* * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual * ENOMEM for a genuine allocation failure. * * We use ENOSPC in our driver to mean that we have run out of aperture * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ if (ret == -ENOSPC) ret = -ENOMEM; return ret; } static int shmem_get_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_memory_region *mem = obj->mm.region; struct address_space *mapping = obj->base.filp->f_mapping; unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); struct sg_table *st; struct sgt_iter sgt_iter; struct page *page; int ret; /* * Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); rebuild_st: st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN); if (!st) return -ENOMEM; ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, max_segment); if (ret) goto err_st; ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { /* * DMA remapping failed? One possible cause is that * it could not reserve enough large entries, asking * for PAGE_SIZE chunks instead may be helpful. */ if (max_segment > PAGE_SIZE) { for_each_sgt_page(page, sgt_iter, st) put_page(page); sg_free_table(st); kfree(st); max_segment = PAGE_SIZE; goto rebuild_st; } else { dev_warn(i915->drm.dev, "Failed to DMA remap %zu pages\n", obj->base.size >> PAGE_SHIFT); goto err_pages; } } if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; __i915_gem_object_set_pages(obj, st); return 0; err_pages: shmem_sg_free_table(st, mapping, false, false); /* * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual * ENOMEM for a genuine allocation failure. * * We use ENOSPC in our driver to mean that we have run out of aperture * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ err_st: if (ret == -ENOSPC) ret = -ENOMEM; kfree(st); return ret; } static int shmem_truncate(struct drm_i915_gem_object *obj) { /* * Our goal here is to return as much of the memory as * is possible back to the system as we are called from OOM. * To do this we must instruct the shmfs to drop all of its * backing pages, *now*. */ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); return 0; } void __shmem_writeback(size_t size, struct address_space *mapping) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, }; unsigned long i; /* * Leave mmapings intact (GTT will have been revoked on unbinding, * leaving only CPU mmapings around) and add those pages to the LRU * instead of invoking writeback so they are aged and paged out * as normal. */ /* Begin writeback on each dirty page */ for (i = 0; i < size >> PAGE_SHIFT; i++) { struct page *page; page = find_lock_page(mapping, i); if (!page) continue; if (!page_mapped(page) && clear_page_dirty_for_io(page)) { int ret; SetPageReclaim(page); ret = mapping->a_ops->writepage(page, &wbc); if (!PageWriteback(page)) ClearPageReclaim(page); if (!ret) goto put; } unlock_page(page); put: put_page(page); } } static void shmem_writeback(struct drm_i915_gem_object *obj) { __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); } static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags) { switch (obj->mm.madv) { case I915_MADV_DONTNEED: return i915_gem_object_truncate(obj); case __I915_MADV_PURGED: return 0; } if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK) shmem_writeback(obj); return 0; } void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush) { struct drm_i915_private *i915 = to_i915(obj->base.dev); GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) obj->mm.dirty = false; if (needs_clflush && (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) drm_clflush_sg(pages); __start_cpu_write(obj); /* * On non-LLC igfx platforms, force the flush-on-acquire if this is ever * swapped-in. Our async flush path is not trust worthy enough yet(and * happens in the wrong order), and with some tricks it's conceivable * for userspace to change the cache-level to I915_CACHE_NONE after the * pages are swapped-in, and since execbuf binds the object before doing * the async flush, we have a race window. */ if (!HAS_LLC(i915) && !IS_DGFX(i915)) obj->cache_dirty = true; } void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) { __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj, pages); shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); kfree(pages); obj->mm.dirty = false; } static void shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { if (likely(i915_gem_object_has_struct_page(obj))) i915_gem_object_put_pages_shmem(obj, pages); else i915_gem_object_put_pages_phys(obj, pages); } static int shmem_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) { struct address_space *mapping = obj->base.filp->f_mapping; const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); u64 remain, offset; unsigned int pg; /* Caller already validated user args */ GEM_BUG_ON(!access_ok(user_data, arg->size)); if (!i915_gem_object_has_struct_page(obj)) return i915_gem_object_pwrite_phys(obj, arg); /* * Before we instantiate/pin the backing store for our use, we * can prepopulate the shmemfs filp efficiently using a write into * the pagecache. We avoid the penalty of instantiating all the * pages, important if the user is just writing to a few and never * uses the object on the GPU, and using a direct write into shmemfs * allows it to avoid the cost of retrieving a page (either swapin * or clearing-before-use) before it is overwritten. */ if (i915_gem_object_has_pages(obj)) return -ENODEV; if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; /* * Before the pages are instantiated the object is treated as being * in the CPU domain. The pages will be clflushed as required before * use, and we can freely write into the pages directly. If userspace * races pwrite with any other operation; corruption will ensue - * that is userspace's prerogative! */ remain = arg->size; offset = arg->offset; pg = offset_in_page(offset); do { unsigned int len, unwritten; struct page *page; void *data, *vaddr; int err; char __maybe_unused c; len = PAGE_SIZE - pg; if (len > remain) len = remain; /* Prefault the user page to reduce potential recursion */ err = __get_user(c, user_data); if (err) return err; err = __get_user(c, user_data + len - 1); if (err) return err; err = aops->write_begin(obj->base.filp, mapping, offset, len, &page, &data); if (err < 0) return err; vaddr = kmap_atomic(page); unwritten = __copy_from_user_inatomic(vaddr + pg, user_data, len); kunmap_atomic(vaddr); err = aops->write_end(obj->base.filp, mapping, offset, len, len - unwritten, page, data); if (err < 0) return err; /* We don't handle -EFAULT, leave it to the caller to check */ if (unwritten) return -ENODEV; remain -= len; user_data += len; offset += len; pg = 0; } while (remain); return 0; } static int shmem_pread(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *arg) { if (!i915_gem_object_has_struct_page(obj)) return i915_gem_object_pread_phys(obj, arg); return -ENODEV; } static void shmem_release(struct drm_i915_gem_object *obj) { if (i915_gem_object_has_struct_page(obj)) i915_gem_object_release_memory_region(obj); fput(obj->base.filp); } const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .name = "i915_gem_object_shmem", .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = shmem_get_pages, .put_pages = shmem_put_pages, .truncate = shmem_truncate, .shrink = shmem_shrink, .pwrite = shmem_pwrite, .pread = shmem_pread, .release = shmem_release, }; static int __create_shmem(struct drm_i915_private *i915, struct drm_gem_object *obj, resource_size_t size) { unsigned long flags = VM_NORESERVE; struct file *filp; drm_gem_private_object_init(&i915->drm, obj, size); /* XXX: The __shmem_file_setup() function returns -EINVAL if size is * greater than MAX_LFS_FILESIZE. * To handle the same error as other code that returns -E2BIG when * the size is too large, we add a code that returns -E2BIG when the * size is larger than the size that can be handled. * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false, * so we only needs to check when BITS_PER_LONG is 64. * If BITS_PER_LONG is 32, E2BIG checks are processed when * i915_gem_object_size_2big() is called before init_object() callback * is called. */ if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE) return -E2BIG; if (i915->mm.gemfs) filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, flags); else filp = shmem_file_setup("i915", size, flags); if (IS_ERR(filp)) return PTR_ERR(filp); obj->filp = filp; return 0; } static int shmem_object_init(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, resource_size_t offset, resource_size_t size, resource_size_t page_size, unsigned int flags) { static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; struct address_space *mapping; unsigned int cache_level; gfp_t mask; int ret; ret = __create_shmem(i915, &obj->base, size); if (ret) return ret; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { /* 965gm cannot relocate objects above 4GiB. */ mask &= ~__GFP_HIGHMEM; mask |= __GFP_DMA32; } mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; /* * MTL doesn't snoop CPU cache by default for GPU access (namely * 1-way coherency). However some UMD's are currently depending on * that. Make 1-way coherent the default setting for MTL. A follow * up patch will extend the GEM_CREATE uAPI to allow UMD's specify * caching mode at BO creation time */ if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))) /* On some devices, we can have the GPU use the LLC (the CPU * cache) for about a 10% performance improvement * compared to uncached. Graphics requests other than * display scanout are coherent with the CPU in * accessing this cache. This means in this mode we * don't need to clflush on the CPU side, and on the * GPU side we only need to flush internal caches to * get data visible to the CPU. * * However, we maintain the display planes as UC, and so * need to rebind when first used as such. */ cache_level = I915_CACHE_LLC; else cache_level = I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); i915_gem_object_init_memory_region(obj, mem); return 0; } struct drm_i915_gem_object * i915_gem_object_create_shmem(struct drm_i915_private *i915, resource_size_t size) { return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], size, 0, 0); } /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, const void *data, resource_size_t size) { struct drm_i915_gem_object *obj; struct file *file; const struct address_space_operations *aops; resource_size_t offset; int err; GEM_WARN_ON(IS_DGFX(dev_priv)); obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); if (IS_ERR(obj)) return obj; GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); file = obj->base.filp; aops = file->f_mapping->a_ops; offset = 0; do { unsigned int len = min_t(typeof(size), size, PAGE_SIZE); struct page *page; void *pgdata, *vaddr; err = aops->write_begin(file, file->f_mapping, offset, len, &page, &pgdata); if (err < 0) goto fail; vaddr = kmap(page); memcpy(vaddr, data, len); kunmap(page); err = aops->write_end(file, file->f_mapping, offset, len, len, page, pgdata); if (err < 0) goto fail; size -= len; data += len; offset += len; } while (size); return obj; fail: i915_gem_object_put(obj); return ERR_PTR(err); } static int init_shmem(struct intel_memory_region *mem) { i915_gemfs_init(mem->i915); intel_memory_region_set_name(mem, "system"); return 0; /* We have fallback to the kernel mnt if gemfs init failed. */ } static int release_shmem(struct intel_memory_region *mem) { i915_gemfs_fini(mem->i915); return 0; } static const struct intel_memory_region_ops shmem_region_ops = { .init = init_shmem, .release = release_shmem, .init_object = shmem_object_init, }; struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915, u16 type, u16 instance) { return intel_memory_region_create(i915, 0, totalram_pages() << PAGE_SHIFT, PAGE_SIZE, 0, 0, type, instance, &shmem_region_ops); } bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) { return obj->ops == &i915_gem_shmem_ops; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2008,2010 Intel Corporation */ #include <linux/dma-resv.h> #include <linux/highmem.h> #include <linux/sync_file.h> #include <linux/uaccess.h> #include <drm/drm_syncobj.h> #include "display/intel_frontbuffer.h" #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" #include "pxp/intel_pxp.h" #include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_file_private.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_evict.h" #include "i915_gem_ioctls.h" #include "i915_reg.h" #include "i915_trace.h" #include "i915_user_extensions.h" struct eb_vma { struct i915_vma *vma; unsigned int flags; /** This vma's place in the execbuf reservation list */ struct drm_i915_gem_exec_object2 *exec; struct list_head bind_link; struct list_head reloc_link; struct hlist_node node; u32 handle; }; enum { FORCE_CPU_RELOC = 1, FORCE_GTT_RELOC, FORCE_GPU_RELOC, #define DBG_FORCE_RELOC 0 /* choose one of the above! */ }; /* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */ #define __EXEC_OBJECT_HAS_PIN BIT(29) #define __EXEC_OBJECT_HAS_FENCE BIT(28) #define __EXEC_OBJECT_USERPTR_INIT BIT(27) #define __EXEC_OBJECT_NEEDS_MAP BIT(26) #define __EXEC_OBJECT_NEEDS_BIAS BIT(25) #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 25) /* all of the above + */ #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) #define __EXEC_ENGINE_PINNED BIT(30) #define __EXEC_USERPTR_USED BIT(29) #define __EXEC_INTERNAL_FLAGS (~0u << 29) #define UPDATE PIN_OFFSET_FIXED #define BATCH_OFFSET_BIAS (256*1024) #define __I915_EXEC_ILLEGAL_FLAGS \ (__I915_EXEC_UNKNOWN_FLAGS | \ I915_EXEC_CONSTANTS_MASK | \ I915_EXEC_RESOURCE_STREAMER) /* Catch emission of unexpected errors for CI! */ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #undef EINVAL #define EINVAL ({ \ DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ 22; \ }) #endif /** * DOC: User command execution * * Userspace submits commands to be executed on the GPU as an instruction * stream within a GEM object we call a batchbuffer. This instructions may * refer to other GEM objects containing auxiliary state such as kernels, * samplers, render targets and even secondary batchbuffers. Userspace does * not know where in the GPU memory these objects reside and so before the * batchbuffer is passed to the GPU for execution, those addresses in the * batchbuffer and auxiliary objects are updated. This is known as relocation, * or patching. To try and avoid having to relocate each object on the next * execution, userspace is told the location of those objects in this pass, * but this remains just a hint as the kernel may choose a new location for * any object in the future. * * At the level of talking to the hardware, submitting a batchbuffer for the * GPU to execute is to add content to a buffer from which the HW * command streamer is reading. * * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. * Execlists, this command is not placed on the same buffer as the * remaining items. * * 2. Add a command to invalidate caches to the buffer. * * 3. Add a batchbuffer start command to the buffer; the start command is * essentially a token together with the GPU address of the batchbuffer * to be executed. * * 4. Add a pipeline flush to the buffer. * * 5. Add a memory write command to the buffer to record when the GPU * is done executing the batchbuffer. The memory write writes the * global sequence number of the request, ``i915_request::global_seqno``; * the i915 driver uses the current value in the register to determine * if the GPU has completed the batchbuffer. * * 6. Add a user interrupt command to the buffer. This command instructs * the GPU to issue an interrupt when the command, pipeline flush and * memory write are completed. * * 7. Inform the hardware of the additional commands added to the buffer * (by updating the tail pointer). * * Processing an execbuf ioctl is conceptually split up into a few phases. * * 1. Validation - Ensure all the pointers, handles and flags are valid. * 2. Reservation - Assign GPU address space for every object * 3. Relocation - Update any addresses to point to the final locations * 4. Serialisation - Order the request with respect to its dependencies * 5. Construction - Construct a request to execute the batchbuffer * 6. Submission (at some point in the future execution) * * Reserving resources for the execbuf is the most complicated phase. We * neither want to have to migrate the object in the address space, nor do * we want to have to update any relocations pointing to this object. Ideally, * we want to leave the object where it is and for all the existing relocations * to match. If the object is given a new address, or if userspace thinks the * object is elsewhere, we have to parse all the relocation entries and update * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that * all the target addresses in all of its objects match the value in the * relocation entries and that they all match the presumed offsets given by the * list of execbuffer objects. Using this knowledge, we know that if we haven't * moved any buffers, all the relocation entries are valid and we can skip * the update. (If userspace is wrong, the likely outcome is an impromptu GPU * hang.) The requirement for using I915_EXEC_NO_RELOC are: * * The addresses written in the objects must match the corresponding * reloc.presumed_offset which in turn must match the corresponding * execobject.offset. * * Any render targets written to in the batch must be flagged with * EXEC_OBJECT_WRITE. * * To avoid stalling, execobject.offset should match the current * address of that object within the active context. * * The reservation is done is multiple phases. First we try and keep any * object already bound in its current location - so as long as meets the * constraints imposed by the new execbuffer. Any object left unbound after the * first pass is then fitted into any available idle space. If an object does * not fit, all objects are removed from the reservation and the process rerun * after sorting the objects into a priority order (more difficult to fit * objects are tried first). Failing that, the entire VM is cleared and we try * to fit the execbuf once last time before concluding that it simply will not * fit. * * A small complication to all of this is that we allow userspace not only to * specify an alignment and a size for the object in the address space, but * we also allow userspace to specify the exact offset. This objects are * simpler to place (the location is known a priori) all we have to do is make * sure the space is available. * * Once all the objects are in place, patching up the buried pointers to point * to the final locations is a fairly simple job of walking over the relocation * entry arrays, looking up the right address and rewriting the value into * the object. Simple! ... The relocation entries are stored in user memory * and so to access them we have to copy them into a local buffer. That copy * has to avoid taking any pagefaults as they may lead back to a GEM object * requiring the struct_mutex (i.e. recursive deadlock). So once again we split * the relocation into multiple passes. First we try to do everything within an * atomic context (avoid the pagefaults) which requires that we never wait. If * we detect that we may wait, or if we need to fault, then we have to fallback * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm * bells yet?) Dropping the mutex means that we lose all the state we have * built up so far for the execbuf and we must reset any global data. However, * we do leave the objects pinned in their final locations - which is a * potential issue for concurrent execbufs. Once we have left the mutex, we can * allocate and copy all the relocation entries into a large array at our * leisure, reacquire the mutex, reclaim all the objects and other state and * then proceed to update any incorrect addresses with the objects. * * As we process the relocation entries, we maintain a record of whether the * object is being written to. Using NORELOC, we expect userspace to provide * this information instead. We also check whether we can skip the relocation * by comparing the expected value inside the relocation entry with the target's * final address. If they differ, we have to map the current object and rewrite * the 4 or 8 byte pointer within. * * Serialising an execbuf is quite simple according to the rules of the GEM * ABI. Execution within each context is ordered by the order of submission. * Writes to any GEM object are in order of submission and are exclusive. Reads * from a GEM object are unordered with respect to other reads, but ordered by * writes. A write submitted after a read cannot occur before the read, and * similarly any read submitted after a write cannot occur before the write. * Writes are ordered between engines such that only one write occurs at any * time (completing any reads beforehand) - using semaphores where available * and CPU serialisation otherwise. Other GEM access obey the same rules, any * write (either via mmaps using set-domain, or via pwrite) must flush all GPU * reads before starting, and any read (either using set-domain or pread) must * flush all GPU writes before starting. (Note we only employ a barrier before, * we currently rely on userspace not concurrently starting a new execution * whilst reading or writing to an object. This may be an advantage or not * depending on how much you trust userspace not to shoot themselves in the * foot.) Serialisation may just result in the request being inserted into * a DAG awaiting its turn, but most simple is to wait on the CPU until * all dependencies are resolved. * * After all of that, is just a matter of closing the request and handing it to * the hardware (well, leaving it in a queue to be executed). However, we also * offer the ability for batchbuffers to be run with elevated privileges so * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) * Before any batch is given extra privileges we first must check that it * contains no nefarious instructions, we check that each instruction is from * our whitelist and all registers are also from an allowed list. We first * copy the user's batchbuffer to a shadow (so that the user doesn't have * access to it, either by the CPU or GPU as we scan it) and then parse each * instruction. If everything is ok, we set a flag telling the hardware to run * the batchbuffer in trusted mode, otherwise the ioctl is rejected. */ struct eb_fence { struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */ struct dma_fence *dma_fence; u64 value; struct dma_fence_chain *chain_fence; }; struct i915_execbuffer { struct drm_i915_private *i915; /** i915 backpointer */ struct drm_file *file; /** per-file lookup tables and limits */ struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ struct eb_vma *vma; struct intel_gt *gt; /* gt for the execbuf */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ /** our requests to build */ struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; /** identity of the batch obj/vma */ struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1]; struct i915_vma *trampoline; /** trampoline used for chaining */ /** used for excl fence in dma_resv objects when > 1 BB submitted */ struct dma_fence *composite_fence; /** actual size of execobj[] as we may extend it for the cmdparser */ unsigned int buffer_count; /* number of batches in execbuf IOCTL */ unsigned int num_batches; /** list of vma not yet bound during reservation phase */ struct list_head unbound; /** list of vma that have execobj.relocation_count */ struct list_head relocs; struct i915_gem_ww_ctx ww; /** * Track the most recently used object for relocations, as we * frequently have to perform multiple relocations within the same * obj/page */ struct reloc_cache { struct drm_mm_node node; /** temporary GTT binding */ unsigned long vaddr; /** Current kmap address */ unsigned long page; /** Currently mapped page index */ unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */ bool use_64bit_reloc : 1; bool has_llc : 1; bool has_fence : 1; bool needs_unfenced : 1; } reloc_cache; u64 invalid_flags; /** Set of execobj.flags that are invalid */ /** Length of batch within object */ u64 batch_len[MAX_ENGINE_INSTANCE + 1]; u32 batch_start_offset; /** Location within object of batch */ u32 batch_flags; /** Flags composed for emit_bb_start() */ struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ /** * Indicate either the size of the hastable used to resolve * relocation handles, or if negative that we are using a direct * index into the execobj[]. */ int lut_size; struct hlist_head *buckets; /** ht for relocation handles */ struct eb_fence *fences; unsigned long num_fences; #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; #endif }; static int eb_parse(struct i915_execbuffer *eb); static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); static void eb_capture_release(struct i915_execbuffer *eb); static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { return intel_engine_requires_cmd_parser(eb->context->engine) || (intel_engine_using_cmd_parser(eb->context->engine) && eb->args->batch_len); } static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { unsigned int size = 1 + ilog2(eb->buffer_count); /* * Without a 1:1 association between relocation handles and * the execobject[] index, we instead create a hashtable. * We size it dynamically based on available memory, starting * first with 1:1 assocative hash and scaling back until * the allocation succeeds. * * Later on we use a positive lut_size to indicate we are * using this hashtable, and a negative value to indicate a * direct lookup. */ do { gfp_t flags; /* While we can still reduce the allocation size, don't * raise a warning and allow the allocation to fail. * On the last pass though, we want to try as hard * as possible to perform the allocation and warn * if it fails. */ flags = GFP_KERNEL; if (size > 1) flags |= __GFP_NORETRY | __GFP_NOWARN; eb->buckets = kzalloc(sizeof(struct hlist_head) << size, flags); if (eb->buckets) break; } while (--size); if (unlikely(!size)) return -ENOMEM; eb->lut_size = size; } else { eb->lut_size = -eb->buffer_count; } return 0; } static bool eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, const struct i915_vma *vma, unsigned int flags) { const u64 start = i915_vma_offset(vma); const u64 size = i915_vma_size(vma); if (size < entry->pad_to_size) return true; if (entry->alignment && !IS_ALIGNED(start, entry->alignment)) return true; if (flags & EXEC_OBJECT_PINNED && start != entry->offset) return true; if (flags & __EXEC_OBJECT_NEEDS_BIAS && start < BATCH_OFFSET_BIAS) return true; if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && (start + size + 4095) >> 32) return true; if (flags & __EXEC_OBJECT_NEEDS_MAP && !i915_vma_is_map_and_fenceable(vma)) return true; return false; } static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, unsigned int exec_flags) { u64 pin_flags = 0; if (exec_flags & EXEC_OBJECT_NEEDS_GTT) pin_flags |= PIN_GLOBAL; /* * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, * limit address to the first 4GBs for unflagged objects. */ if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) pin_flags |= PIN_ZONE_4G; if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) pin_flags |= PIN_MAPPABLE; if (exec_flags & EXEC_OBJECT_PINNED) pin_flags |= entry->offset | PIN_OFFSET_FIXED; else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; return pin_flags; } static inline int eb_pin_vma(struct i915_execbuffer *eb, const struct drm_i915_gem_exec_object2 *entry, struct eb_vma *ev) { struct i915_vma *vma = ev->vma; u64 pin_flags; int err; if (vma->node.size) pin_flags = __i915_vma_offset(vma); else pin_flags = entry->offset & PIN_OFFSET_MASK; pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE; if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) pin_flags |= PIN_GLOBAL; /* Attempt to reuse the current location if available */ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags); if (err == -EDEADLK) return err; if (unlikely(err)) { if (entry->flags & EXEC_OBJECT_PINNED) return err; /* Failing that pick any _free_ space if suitable */ err = i915_vma_pin_ww(vma, &eb->ww, entry->pad_to_size, entry->alignment, eb_pin_flags(entry, ev->flags) | PIN_USER | PIN_NOEVICT | PIN_VALIDATE); if (unlikely(err)) return err; } if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_pin_fence(vma); if (unlikely(err)) return err; if (vma->fence) ev->flags |= __EXEC_OBJECT_HAS_FENCE; } ev->flags |= __EXEC_OBJECT_HAS_PIN; if (eb_vma_misplaced(entry, vma, ev->flags)) return -EBADSLT; return 0; } static inline void eb_unreserve_vma(struct eb_vma *ev) { if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) __i915_vma_unpin_fence(ev->vma); ev->flags &= ~__EXEC_OBJECT_RESERVED; } static int eb_validate_vma(struct i915_execbuffer *eb, struct drm_i915_gem_exec_object2 *entry, struct i915_vma *vma) { /* Relocations are disallowed for all platforms after TGL-LP. This * also covers all platforms with local memory. */ if (entry->relocation_count && GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) return -EINVAL; if (unlikely(entry->flags & eb->invalid_flags)) return -EINVAL; if (unlikely(entry->alignment && !is_power_of_2_u64(entry->alignment))) return -EINVAL; /* * Offset can be used as input (EXEC_OBJECT_PINNED), reject * any non-page-aligned or non-canonical addresses. */ if (unlikely(entry->flags & EXEC_OBJECT_PINNED && entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) return -EINVAL; /* pad_to_size was once a reserved field, so sanitize it */ if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { if (unlikely(offset_in_page(entry->pad_to_size))) return -EINVAL; } else { entry->pad_to_size = 0; } /* * From drm_mm perspective address space is continuous, * so from this point we're always using non-canonical * form internally. */ entry->offset = gen8_noncanonical_addr(entry->offset); if (!eb->reloc_cache.has_fence) { entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; } else { if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || eb->reloc_cache.needs_unfenced) && i915_gem_object_is_tiled(vma->obj)) entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; } return 0; } static inline bool is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) { return eb->args->flags & I915_EXEC_BATCH_FIRST ? buffer_idx < eb->num_batches : buffer_idx >= eb->args->buffer_count - eb->num_batches; } static int eb_add_vma(struct i915_execbuffer *eb, unsigned int *current_batch, unsigned int i, struct i915_vma *vma) { struct drm_i915_private *i915 = eb->i915; struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; ev->vma = vma; ev->exec = entry; ev->flags = entry->flags; if (eb->lut_size > 0) { ev->handle = entry->handle; hlist_add_head(&ev->node, &eb->buckets[hash_32(entry->handle, eb->lut_size)]); } if (entry->relocation_count) list_add_tail(&ev->reloc_link, &eb->relocs); /* * SNA is doing fancy tricks with compressing batch buffers, which leads * to negative relocation deltas. Usually that works out ok since the * relocate address is still positive, except when the batch is placed * very low in the GTT. Ensure this doesn't happen. * * Note that actual hangs have only been observed on gen7, but for * paranoia do it everywhere. */ if (is_batch_buffer(eb, i)) { if (entry->relocation_count && !(ev->flags & EXEC_OBJECT_PINNED)) ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) ev->flags |= EXEC_OBJECT_NEEDS_FENCE; eb->batches[*current_batch] = ev; if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) { drm_dbg(&i915->drm, "Attempting to use self-modifying batch buffer\n"); return -EINVAL; } if (range_overflows_t(u64, eb->batch_start_offset, eb->args->batch_len, ev->vma->size)) { drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); return -EINVAL; } if (eb->args->batch_len == 0) eb->batch_len[*current_batch] = ev->vma->size - eb->batch_start_offset; else eb->batch_len[*current_batch] = eb->args->batch_len; if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */ drm_dbg(&i915->drm, "Invalid batch length\n"); return -EINVAL; } ++*current_batch; } return 0; } static inline int use_cpu_reloc(const struct reloc_cache *cache, const struct drm_i915_gem_object *obj) { if (!i915_gem_object_has_struct_page(obj)) return false; if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) return true; if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) return false; /* * For objects created by userspace through GEM_CREATE with pat_index * set by set_pat extension, i915_gem_object_has_cache_level() always * return true, otherwise the call would fall back to checking whether * the object is un-cached. */ return (cache->has_llc || obj->cache_dirty || !i915_gem_object_has_cache_level(obj, I915_CACHE_NONE)); } static int eb_reserve_vma(struct i915_execbuffer *eb, struct eb_vma *ev, u64 pin_flags) { struct drm_i915_gem_exec_object2 *entry = ev->exec; struct i915_vma *vma = ev->vma; int err; if (drm_mm_node_allocated(&vma->node) && eb_vma_misplaced(entry, vma, ev->flags)) { err = i915_vma_unbind(vma); if (err) return err; } err = i915_vma_pin_ww(vma, &eb->ww, entry->pad_to_size, entry->alignment, eb_pin_flags(entry, ev->flags) | pin_flags); if (err) return err; if (entry->offset != i915_vma_offset(vma)) { entry->offset = i915_vma_offset(vma) | UPDATE; eb->args->flags |= __EXEC_HAS_RELOC; } if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_pin_fence(vma); if (unlikely(err)) return err; if (vma->fence) ev->flags |= __EXEC_OBJECT_HAS_FENCE; } ev->flags |= __EXEC_OBJECT_HAS_PIN; GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); return 0; } static bool eb_unbind(struct i915_execbuffer *eb, bool force) { const unsigned int count = eb->buffer_count; unsigned int i; struct list_head last; bool unpinned = false; /* Resort *all* the objects into priority order */ INIT_LIST_HEAD(&eb->unbound); INIT_LIST_HEAD(&last); for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; unsigned int flags = ev->flags; if (!force && flags & EXEC_OBJECT_PINNED && flags & __EXEC_OBJECT_HAS_PIN) continue; unpinned = true; eb_unreserve_vma(ev); if (flags & EXEC_OBJECT_PINNED) /* Pinned must have their slot */ list_add(&ev->bind_link, &eb->unbound); else if (flags & __EXEC_OBJECT_NEEDS_MAP) /* Map require the lowest 256MiB (aperture) */ list_add_tail(&ev->bind_link, &eb->unbound); else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) /* Prioritise 4GiB region for restricted bo */ list_add(&ev->bind_link, &last); else list_add_tail(&ev->bind_link, &last); } list_splice_tail(&last, &eb->unbound); return unpinned; } static int eb_reserve(struct i915_execbuffer *eb) { struct eb_vma *ev; unsigned int pass; int err = 0; /* * We have one more buffers that we couldn't bind, which could be due to * various reasons. To resolve this we have 4 passes, with every next * level turning the screws tighter: * * 0. Unbind all objects that do not match the GTT constraints for the * execbuffer (fenceable, mappable, alignment etc). Bind all new * objects. This avoids unnecessary unbinding of later objects in order * to make room for the earlier objects *unless* we need to defragment. * * 1. Reorder the buffers, where objects with the most restrictive * placement requirements go first (ignoring fixed location buffers for * now). For example, objects needing the mappable aperture (the first * 256M of GTT), should go first vs objects that can be placed just * about anywhere. Repeat the previous pass. * * 2. Consider buffers that are pinned at a fixed location. Also try to * evict the entire VM this time, leaving only objects that we were * unable to lock. Try again to bind the buffers. (still using the new * buffer order). * * 3. We likely have object lock contention for one or more stubborn * objects in the VM, for which we need to evict to make forward * progress (perhaps we are fighting the shrinker?). When evicting the * VM this time around, anything that we can't lock we now track using * the busy_bo, using the full lock (after dropping the vm->mutex to * prevent deadlocks), instead of trylock. We then continue to evict the * VM, this time with the stubborn object locked, which we can now * hopefully unbind (if still bound in the VM). Repeat until the VM is * evicted. Finally we should be able bind everything. */ for (pass = 0; pass <= 3; pass++) { int pin_flags = PIN_USER | PIN_VALIDATE; if (pass == 0) pin_flags |= PIN_NONBLOCK; if (pass >= 1) eb_unbind(eb, pass >= 2); if (pass == 2) { err = mutex_lock_interruptible(&eb->context->vm->mutex); if (!err) { err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL); mutex_unlock(&eb->context->vm->mutex); } if (err) return err; } if (pass == 3) { retry: err = mutex_lock_interruptible(&eb->context->vm->mutex); if (!err) { struct drm_i915_gem_object *busy_bo = NULL; err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo); mutex_unlock(&eb->context->vm->mutex); if (err && busy_bo) { err = i915_gem_object_lock(busy_bo, &eb->ww); i915_gem_object_put(busy_bo); if (!err) goto retry; } } if (err) return err; } list_for_each_entry(ev, &eb->unbound, bind_link) { err = eb_reserve_vma(eb, ev, pin_flags); if (err) break; } if (err != -ENOSPC) break; } return err; } static int eb_select_context(struct i915_execbuffer *eb) { struct i915_gem_context *ctx; ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); if (unlikely(IS_ERR(ctx))) return PTR_ERR(ctx); eb->gem_context = ctx; if (i915_gem_context_has_full_ppgtt(ctx)) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; return 0; } static int __eb_add_lut(struct i915_execbuffer *eb, u32 handle, struct i915_vma *vma) { struct i915_gem_context *ctx = eb->gem_context; struct i915_lut_handle *lut; int err; lut = i915_lut_handle_alloc(); if (unlikely(!lut)) return -ENOMEM; i915_vma_get(vma); if (!atomic_fetch_inc(&vma->open_count)) i915_vma_reopen(vma); lut->handle = handle; lut->ctx = ctx; /* Check that the context hasn't been closed in the meantime */ err = -EINTR; if (!mutex_lock_interruptible(&ctx->lut_mutex)) { if (likely(!i915_gem_context_is_closed(ctx))) err = radix_tree_insert(&ctx->handles_vma, handle, vma); else err = -ENOENT; if (err == 0) { /* And nor has this handle */ struct drm_i915_gem_object *obj = vma->obj; spin_lock(&obj->lut_lock); if (idr_find(&eb->file->object_idr, handle) == obj) { list_add(&lut->obj_link, &obj->lut_list); } else { radix_tree_delete(&ctx->handles_vma, handle); err = -ENOENT; } spin_unlock(&obj->lut_lock); } mutex_unlock(&ctx->lut_mutex); } if (unlikely(err)) goto err; return 0; err: i915_vma_close(vma); i915_vma_put(vma); i915_lut_handle_free(lut); return err; } static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) { struct i915_address_space *vm = eb->context->vm; do { struct drm_i915_gem_object *obj; struct i915_vma *vma; int err; rcu_read_lock(); vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); if (likely(vma && vma->vm == vm)) vma = i915_vma_tryget(vma); rcu_read_unlock(); if (likely(vma)) return vma; obj = i915_gem_object_lookup(eb->file, handle); if (unlikely(!obj)) return ERR_PTR(-ENOENT); /* * If the user has opted-in for protected-object tracking, make * sure the object encryption can be used. * We only need to do this when the object is first used with * this context, because the context itself will be banned when * the protected objects become invalid. */ if (i915_gem_context_uses_protected_content(eb->gem_context) && i915_gem_object_is_protected(obj)) { err = intel_pxp_key_check(eb->i915->pxp, obj, true); if (err) { i915_gem_object_put(obj); return ERR_PTR(err); } } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { i915_gem_object_put(obj); return vma; } err = __eb_add_lut(eb, handle, vma); if (likely(!err)) return vma; i915_gem_object_put(obj); if (err != -EEXIST) return ERR_PTR(err); } while (1); } static int eb_lookup_vmas(struct i915_execbuffer *eb) { unsigned int i, current_batch = 0; int err = 0; INIT_LIST_HEAD(&eb->relocs); for (i = 0; i < eb->buffer_count; i++) { struct i915_vma *vma; vma = eb_lookup_vma(eb, eb->exec[i].handle); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; } err = eb_validate_vma(eb, &eb->exec[i], vma); if (unlikely(err)) { i915_vma_put(vma); goto err; } err = eb_add_vma(eb, &current_batch, i, vma); if (err) return err; if (i915_gem_object_is_userptr(vma->obj)) { err = i915_gem_object_userptr_submit_init(vma->obj); if (err) { if (i + 1 < eb->buffer_count) { /* * Execbuffer code expects last vma entry to be NULL, * since we already initialized this entry, * set the next value to NULL or we mess up * cleanup handling. */ eb->vma[i + 1].vma = NULL; } return err; } eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT; eb->args->flags |= __EXEC_USERPTR_USED; } } return 0; err: eb->vma[i].vma = NULL; return err; } static int eb_lock_vmas(struct i915_execbuffer *eb) { unsigned int i; int err; for (i = 0; i < eb->buffer_count; i++) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; err = i915_gem_object_lock(vma->obj, &eb->ww); if (err) return err; } return 0; } static int eb_validate_vmas(struct i915_execbuffer *eb) { unsigned int i; int err; INIT_LIST_HEAD(&eb->unbound); err = eb_lock_vmas(eb); if (err) return err; for (i = 0; i < eb->buffer_count; i++) { struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; err = eb_pin_vma(eb, entry, ev); if (err == -EDEADLK) return err; if (!err) { if (entry->offset != i915_vma_offset(vma)) { entry->offset = i915_vma_offset(vma) | UPDATE; eb->args->flags |= __EXEC_HAS_RELOC; } } else { eb_unreserve_vma(ev); list_add_tail(&ev->bind_link, &eb->unbound); if (drm_mm_node_allocated(&vma->node)) { err = i915_vma_unbind(vma); if (err) return err; } } /* Reserve enough slots to accommodate composite fences */ err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches); if (err) return err; GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); } if (!list_empty(&eb->unbound)) return eb_reserve(eb); return 0; } static struct eb_vma * eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) { if (eb->lut_size < 0) { if (handle >= -eb->lut_size) return NULL; return &eb->vma[handle]; } else { struct hlist_head *head; struct eb_vma *ev; head = &eb->buckets[hash_32(handle, eb->lut_size)]; hlist_for_each_entry(ev, head, node) { if (ev->handle == handle) return ev; } return NULL; } } static void eb_release_vmas(struct i915_execbuffer *eb, bool final) { const unsigned int count = eb->buffer_count; unsigned int i; for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; if (!vma) break; eb_unreserve_vma(ev); if (final) i915_vma_put(vma); } eb_capture_release(eb); eb_unpin_engine(eb); } static void eb_destroy(const struct i915_execbuffer *eb) { if (eb->lut_size > 0) kfree(eb->buckets); } static inline u64 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, const struct i915_vma *target) { return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target)); } static void reloc_cache_init(struct reloc_cache *cache, struct drm_i915_private *i915) { cache->page = -1; cache->vaddr = 0; /* Must be a variable in the struct to allow GCC to unroll. */ cache->graphics_ver = GRAPHICS_VER(i915); cache->has_llc = HAS_LLC(i915); cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); cache->has_fence = cache->graphics_ver < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; } static inline void *unmask_page(unsigned long p) { return (void *)(uintptr_t)(p & PAGE_MASK); } static inline unsigned int unmask_flags(unsigned long p) { return p & ~PAGE_MASK; } #define KMAP 0x4 /* after CLFLUSH_FLAGS */ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) { struct drm_i915_private *i915 = container_of(cache, struct i915_execbuffer, reloc_cache)->i915; return to_gt(i915)->ggtt; } static void reloc_cache_unmap(struct reloc_cache *cache) { void *vaddr; if (!cache->vaddr) return; vaddr = unmask_page(cache->vaddr); if (cache->vaddr & KMAP) kunmap_atomic(vaddr); else io_mapping_unmap_atomic((void __iomem *)vaddr); } static void reloc_cache_remap(struct reloc_cache *cache, struct drm_i915_gem_object *obj) { void *vaddr; if (!cache->vaddr) return; if (cache->vaddr & KMAP) { struct page *page = i915_gem_object_get_page(obj, cache->page); vaddr = kmap_atomic(page); cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; } else { struct i915_ggtt *ggtt = cache_to_ggtt(cache); unsigned long offset; offset = cache->node.start; if (!drm_mm_node_allocated(&cache->node)) offset += cache->page << PAGE_SHIFT; cache->vaddr = (unsigned long) io_mapping_map_atomic_wc(&ggtt->iomap, offset); } } static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb) { void *vaddr; if (!cache->vaddr) return; vaddr = unmask_page(cache->vaddr); if (cache->vaddr & KMAP) { struct drm_i915_gem_object *obj = (struct drm_i915_gem_object *)cache->node.mm; if (cache->vaddr & CLFLUSH_AFTER) mb(); kunmap_atomic(vaddr); i915_gem_object_finish_access(obj); } else { struct i915_ggtt *ggtt = cache_to_ggtt(cache); intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __iomem *)vaddr); if (drm_mm_node_allocated(&cache->node)) { ggtt->vm.clear_range(&ggtt->vm, cache->node.start, cache->node.size); mutex_lock(&ggtt->vm.mutex); drm_mm_remove_node(&cache->node); mutex_unlock(&ggtt->vm.mutex); } else { i915_vma_unpin((struct i915_vma *)cache->node.mm); } } cache->vaddr = 0; cache->page = -1; } static void *reloc_kmap(struct drm_i915_gem_object *obj, struct reloc_cache *cache, unsigned long pageno) { void *vaddr; struct page *page; if (cache->vaddr) { kunmap_atomic(unmask_page(cache->vaddr)); } else { unsigned int flushes; int err; err = i915_gem_object_prepare_write(obj, &flushes); if (err) return ERR_PTR(err); BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); cache->vaddr = flushes | KMAP; cache->node.mm = (void *)obj; if (flushes) mb(); } page = i915_gem_object_get_page(obj, pageno); if (!obj->mm.dirty) set_page_dirty(page); vaddr = kmap_atomic(page); cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; cache->page = pageno; return vaddr; } static void *reloc_iomap(struct i915_vma *batch, struct i915_execbuffer *eb, unsigned long page) { struct drm_i915_gem_object *obj = batch->obj; struct reloc_cache *cache = &eb->reloc_cache; struct i915_ggtt *ggtt = cache_to_ggtt(cache); unsigned long offset; void *vaddr; if (cache->vaddr) { intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); } else { struct i915_vma *vma = ERR_PTR(-ENODEV); int err; if (i915_gem_object_is_tiled(obj)) return ERR_PTR(-EINVAL); if (use_cpu_reloc(cache, obj)) return NULL; err = i915_gem_object_set_to_gtt_domain(obj, true); if (err) return ERR_PTR(err); /* * i915_gem_object_ggtt_pin_ww may attempt to remove the batch * VMA from the object list because we no longer pin. * * Only attempt to pin the batch buffer to ggtt if the current batch * is not inside ggtt, or the batch buffer is not misplaced. */ if (!i915_is_ggtt(batch->vm) || !i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) { vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK /* NOWARN */ | PIN_NOEVICT); } if (vma == ERR_PTR(-EDEADLK)) return vma; if (IS_ERR(vma)) { memset(&cache->node, 0, sizeof(cache->node)); mutex_lock(&ggtt->vm.mutex); err = drm_mm_insert_node_in_range (&ggtt->vm.mm, &cache->node, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); mutex_unlock(&ggtt->vm.mutex); if (err) /* no inactive aperture space, use cpu reloc */ return NULL; } else { cache->node.start = i915_ggtt_offset(vma); cache->node.mm = (void *)vma; } } offset = cache->node.start; if (drm_mm_node_allocated(&cache->node)) { ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, page), offset, i915_gem_get_pat_index(ggtt->vm.i915, I915_CACHE_NONE), 0); } else { offset += page << PAGE_SHIFT; } vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, offset); cache->page = page; cache->vaddr = (unsigned long)vaddr; return vaddr; } static void *reloc_vaddr(struct i915_vma *vma, struct i915_execbuffer *eb, unsigned long page) { struct reloc_cache *cache = &eb->reloc_cache; void *vaddr; if (cache->page == page) { vaddr = unmask_page(cache->vaddr); } else { vaddr = NULL; if ((cache->vaddr & KMAP) == 0) vaddr = reloc_iomap(vma, eb, page); if (!vaddr) vaddr = reloc_kmap(vma->obj, cache, page); } return vaddr; } static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) { if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { if (flushes & CLFLUSH_BEFORE) drm_clflush_virt_range(addr, sizeof(*addr)); *addr = value; /* * Writes to the same cacheline are serialised by the CPU * (including clflush). On the write path, we only require * that it hits memory in an orderly fashion and place * mb barriers at the start and end of the relocation phase * to ensure ordering of clflush wrt to the system. */ if (flushes & CLFLUSH_AFTER) drm_clflush_virt_range(addr, sizeof(*addr)); } else *addr = value; } static u64 relocate_entry(struct i915_vma *vma, const struct drm_i915_gem_relocation_entry *reloc, struct i915_execbuffer *eb, const struct i915_vma *target) { u64 target_addr = relocation_target(reloc, target); u64 offset = reloc->offset; bool wide = eb->reloc_cache.use_64bit_reloc; void *vaddr; repeat: vaddr = reloc_vaddr(vma, eb, offset >> PAGE_SHIFT); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32))); clflush_write32(vaddr + offset_in_page(offset), lower_32_bits(target_addr), eb->reloc_cache.vaddr); if (wide) { offset += sizeof(u32); target_addr >>= 32; wide = false; goto repeat; } return target->node.start | UPDATE; } static u64 eb_relocate_entry(struct i915_execbuffer *eb, struct eb_vma *ev, const struct drm_i915_gem_relocation_entry *reloc) { struct drm_i915_private *i915 = eb->i915; struct eb_vma *target; int err; /* we've already hold a reference to all valid objects */ target = eb_get_vma(eb, reloc->target_handle); if (unlikely(!target)) return -ENOENT; /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { drm_dbg(&i915->drm, "reloc with multiple write domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return -EINVAL; } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return -EINVAL; } if (reloc->write_domain) { target->flags |= EXEC_OBJECT_WRITE; /* * Sandybridge PPGTT errata: We need a global gtt mapping * for MI and pipe_control writes because the gpu doesn't * properly redirect them through the ppgtt for non_secure * batchbuffers. */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && GRAPHICS_VER(eb->i915) == 6 && !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) { struct i915_vma *vma = target->vma; reloc_cache_unmap(&eb->reloc_cache); mutex_lock(&vma->vm->mutex); err = i915_vma_bind(target->vma, target->vma->obj->pat_index, PIN_GLOBAL, NULL, NULL); mutex_unlock(&vma->vm->mutex); reloc_cache_remap(&eb->reloc_cache, ev->vma->obj); if (err) return err; } } /* * If the relocation already has the right value in it, no * more work needs to be done. */ if (!DBG_FORCE_RELOC && gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { drm_dbg(&i915->drm, "Relocation beyond object bounds: " "target %d offset %d size %d.\n", reloc->target_handle, (int)reloc->offset, (int)ev->vma->size); return -EINVAL; } if (unlikely(reloc->offset & 3)) { drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " "target %d offset %d.\n", reloc->target_handle, (int)reloc->offset); return -EINVAL; } /* * If we write into the object, we need to force the synchronisation * barrier, either with an asynchronous clflush or if we executed the * patching using the GPU (though that should be serialised by the * timeline). To be completely sure, and since we are required to * do relocations we are already stalling, disable the user's opt * out of our synchronisation. */ ev->flags &= ~EXEC_OBJECT_ASYNC; /* and update the user's relocation entry */ return relocate_entry(ev->vma, reloc, eb, target->vma); } static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; const struct drm_i915_gem_exec_object2 *entry = ev->exec; struct drm_i915_gem_relocation_entry __user *urelocs = u64_to_user_ptr(entry->relocs_ptr); unsigned long remain = entry->relocation_count; if (unlikely(remain > N_RELOC(ULONG_MAX))) return -EINVAL; /* * We must check that the entire relocation array is safe * to read. However, if the array is not writable the user loses * the updated relocation values. */ if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) return -EFAULT; do { struct drm_i915_gem_relocation_entry *r = stack; unsigned int count = min_t(unsigned long, remain, ARRAY_SIZE(stack)); unsigned int copied; /* * This is the fast path and we cannot handle a pagefault * whilst holding the struct mutex lest the user pass in the * relocations contained within a mmaped bo. For in such a case * we, the page fault handler would call i915_gem_fault() and * we would try to acquire the struct mutex again. Obviously * this is bad and so lockdep complains vehemently. */ pagefault_disable(); copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); pagefault_enable(); if (unlikely(copied)) { remain = -EFAULT; goto out; } remain -= count; do { u64 offset = eb_relocate_entry(eb, ev, r); if (likely(offset == 0)) { } else if ((s64)offset < 0) { remain = (int)offset; goto out; } else { /* * Note that reporting an error now * leaves everything in an inconsistent * state as we have *already* changed * the relocation value inside the * object. As we have not changed the * reloc.presumed_offset or will not * change the execobject.offset, on the * call we may not rewrite the value * inside the object, leaving it * dangling and causing a GPU hang. Unless * userspace dynamically rebuilds the * relocations on each execbuf rather than * presume a static tree. * * We did previously check if the relocations * were writable (access_ok), an error now * would be a strange race with mprotect, * having already demonstrated that we * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); __put_user(offset, &urelocs[r - stack].presumed_offset); } } while (r++, --count); urelocs += ARRAY_SIZE(stack); } while (remain); out: reloc_cache_reset(&eb->reloc_cache, eb); return remain; } static int eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev) { const struct drm_i915_gem_exec_object2 *entry = ev->exec; struct drm_i915_gem_relocation_entry *relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); unsigned int i; int err; for (i = 0; i < entry->relocation_count; i++) { u64 offset = eb_relocate_entry(eb, ev, &relocs[i]); if ((s64)offset < 0) { err = (int)offset; goto err; } } err = 0; err: reloc_cache_reset(&eb->reloc_cache, eb); return err; } static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) { const char __user *addr, *end; unsigned long size; char __maybe_unused c; size = entry->relocation_count; if (size == 0) return 0; if (size > N_RELOC(ULONG_MAX)) return -EINVAL; addr = u64_to_user_ptr(entry->relocs_ptr); size *= sizeof(struct drm_i915_gem_relocation_entry); if (!access_ok(addr, size)) return -EFAULT; end = addr + size; for (; addr < end; addr += PAGE_SIZE) { int err = __get_user(c, addr); if (err) return err; } return __get_user(c, end - 1); } static int eb_copy_relocations(const struct i915_execbuffer *eb) { struct drm_i915_gem_relocation_entry *relocs; const unsigned int count = eb->buffer_count; unsigned int i; int err; for (i = 0; i < count; i++) { const unsigned int nreloc = eb->exec[i].relocation_count; struct drm_i915_gem_relocation_entry __user *urelocs; unsigned long size; unsigned long copied; if (nreloc == 0) continue; err = check_relocations(&eb->exec[i]); if (err) goto err; urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); size = nreloc * sizeof(*relocs); relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { err = -ENOMEM; goto err; } /* copy_from_user is limited to < 4GiB */ copied = 0; do { unsigned int len = min_t(u64, BIT_ULL(31), size - copied); if (__copy_from_user((char *)relocs + copied, (char __user *)urelocs + copied, len)) goto end; copied += len; } while (copied < size); /* * As we do not update the known relocation offsets after * relocating (due to the complexities in lock handling), * we need to mark them as invalid now so that we force the * relocation processing next time. Just in case the target * object is evicted and then rebound into its old * presumed_offset before the next execbuffer - if that * happened we would make the mistake of assuming that the * relocations were valid. */ if (!user_access_begin(urelocs, size)) goto end; for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, &urelocs[copied].presumed_offset, end_user); user_access_end(); eb->exec[i].relocs_ptr = (uintptr_t)relocs; } return 0; end_user: user_access_end(); end: kvfree(relocs); err = -EFAULT; err: while (i--) { relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); if (eb->exec[i].relocation_count) kvfree(relocs); } return err; } static int eb_prefault_relocations(const struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i; for (i = 0; i < count; i++) { int err; err = check_relocations(&eb->exec[i]); if (err) return err; } return 0; } static int eb_reinit_userptr(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i; int ret; if (likely(!(eb->args->flags & __EXEC_USERPTR_USED))) return 0; for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; if (!i915_gem_object_is_userptr(ev->vma->obj)) continue; ret = i915_gem_object_userptr_submit_init(ev->vma->obj); if (ret) return ret; ev->flags |= __EXEC_OBJECT_USERPTR_INIT; } return 0; } static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb) { bool have_copy = false; struct eb_vma *ev; int err = 0; repeat: if (signal_pending(current)) { err = -ERESTARTSYS; goto out; } /* We may process another execbuffer during the unlock... */ eb_release_vmas(eb, false); i915_gem_ww_ctx_fini(&eb->ww); /* * We take 3 passes through the slowpatch. * * 1 - we try to just prefault all the user relocation entries and * then attempt to reuse the atomic pagefault disabled fast path again. * * 2 - we copy the user entries to a local buffer here outside of the * local and allow ourselves to wait upon any rendering before * relocations * * 3 - we already have a local copy of the relocation entries, but * were interrupted (EAGAIN) whilst waiting for the objects, try again. */ if (!err) { err = eb_prefault_relocations(eb); } else if (!have_copy) { err = eb_copy_relocations(eb); have_copy = err == 0; } else { cond_resched(); err = 0; } if (!err) err = eb_reinit_userptr(eb); i915_gem_ww_ctx_init(&eb->ww, true); if (err) goto out; /* reacquire the objects */ repeat_validate: err = eb_pin_engine(eb, false); if (err) goto err; err = eb_validate_vmas(eb); if (err) goto err; GEM_BUG_ON(!eb->batches[0]); list_for_each_entry(ev, &eb->relocs, reloc_link) { if (!have_copy) { err = eb_relocate_vma(eb, ev); if (err) break; } else { err = eb_relocate_vma_slow(eb, ev); if (err) break; } } if (err == -EDEADLK) goto err; if (err && !have_copy) goto repeat; if (err) goto err; /* as last step, parse the command buffer */ err = eb_parse(eb); if (err) goto err; /* * Leave the user relocations as are, this is the painfully slow path, * and we want to avoid the complication of dropping the lock whilst * having buffers reserved in the aperture and so causing spurious * ENOSPC for random operations. */ err: if (err == -EDEADLK) { eb_release_vmas(eb, false); err = i915_gem_ww_ctx_backoff(&eb->ww); if (!err) goto repeat_validate; } if (err == -EAGAIN) goto repeat; out: if (have_copy) { const unsigned int count = eb->buffer_count; unsigned int i; for (i = 0; i < count; i++) { const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct drm_i915_gem_relocation_entry *relocs; if (!entry->relocation_count) continue; relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); kvfree(relocs); } } return err; } static int eb_relocate_parse(struct i915_execbuffer *eb) { int err; bool throttle = true; retry: err = eb_pin_engine(eb, throttle); if (err) { if (err != -EDEADLK) return err; goto err; } /* only throttle once, even if we didn't need to throttle */ throttle = false; err = eb_validate_vmas(eb); if (err == -EAGAIN) goto slow; else if (err) goto err; /* The objects are in their final locations, apply the relocations. */ if (eb->args->flags & __EXEC_HAS_RELOC) { struct eb_vma *ev; list_for_each_entry(ev, &eb->relocs, reloc_link) { err = eb_relocate_vma(eb, ev); if (err) break; } if (err == -EDEADLK) goto err; else if (err) goto slow; } if (!err) err = eb_parse(eb); err: if (err == -EDEADLK) { eb_release_vmas(eb, false); err = i915_gem_ww_ctx_backoff(&eb->ww); if (!err) goto retry; } return err; slow: err = eb_relocate_parse_slow(eb); if (err) /* * If the user expects the execobject.offset and * reloc.presumed_offset to be an exact match, * as for using NO_RELOC, then we cannot update * the execobject.offset until we have completed * relocation. */ eb->args->flags &= ~__EXEC_HAS_RELOC; return err; } /* * Using two helper loops for the order of which requests / batches are created * and added the to backend. Requests are created in order from the parent to * the last child. Requests are added in the reverse order, from the last child * to parent. This is done for locking reasons as the timeline lock is acquired * during request creation and released when the request is added to the * backend. To make lockdep happy (see intel_context_timeline_lock) this must be * the ordering. */ #define for_each_batch_create_order(_eb, _i) \ for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i)) #define for_each_batch_add_order(_eb, _i) \ BUILD_BUG_ON(!typecheck(int, _i)); \ for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i)) static struct i915_request * eb_find_first_request_added(struct i915_execbuffer *eb) { int i; for_each_batch_add_order(eb, i) if (eb->requests[i]) return eb->requests[i]; GEM_BUG_ON("Request not found"); return NULL; } #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) /* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ static int eb_capture_stage(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i = count, j; while (i--) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; unsigned int flags = ev->flags; if (!(flags & EXEC_OBJECT_CAPTURE)) continue; if (i915_gem_context_is_recoverable(eb->gem_context) && (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) return -EINVAL; for_each_batch_create_order(eb, j) { struct i915_capture_list *capture; capture = kmalloc(sizeof(*capture), GFP_KERNEL); if (!capture) continue; capture->next = eb->capture_lists[j]; capture->vma_res = i915_vma_resource_get(vma->resource); eb->capture_lists[j] = capture; } } return 0; } /* Commit once we're in the critical path */ static void eb_capture_commit(struct i915_execbuffer *eb) { unsigned int j; for_each_batch_create_order(eb, j) { struct i915_request *rq = eb->requests[j]; if (!rq) break; rq->capture_list = eb->capture_lists[j]; eb->capture_lists[j] = NULL; } } /* * Release anything that didn't get committed due to errors. * The capture_list will otherwise be freed at request retire. */ static void eb_capture_release(struct i915_execbuffer *eb) { unsigned int j; for_each_batch_create_order(eb, j) { if (eb->capture_lists[j]) { i915_request_free_capture_list(eb->capture_lists[j]); eb->capture_lists[j] = NULL; } } } static void eb_capture_list_clear(struct i915_execbuffer *eb) { memset(eb->capture_lists, 0, sizeof(eb->capture_lists)); } #else static int eb_capture_stage(struct i915_execbuffer *eb) { return 0; } static void eb_capture_commit(struct i915_execbuffer *eb) { } static void eb_capture_release(struct i915_execbuffer *eb) { } static void eb_capture_list_clear(struct i915_execbuffer *eb) { } #endif static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; unsigned int i = count; int err = 0, j; while (i--) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; unsigned int flags = ev->flags; struct drm_i915_gem_object *obj = vma->obj; assert_vma_held(vma); /* * If the GPU is not _reading_ through the CPU cache, we need * to make sure that any writes (both previous GPU writes from * before a change in snooping levels and normal CPU writes) * caught in that cache are flushed to main memory. * * We want to say * obj->cache_dirty && * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) * but gcc's optimiser doesn't handle that as well and emits * two jumps instead of one. Maybe one day... * * FIXME: There is also sync flushing in set_pages(), which * serves a different purpose(some of the time at least). * * We should consider: * * 1. Rip out the async flush code. * * 2. Or make the sync flushing use the async clflush path * using mandatory fences underneath. Currently the below * async flush happens after we bind the object. */ if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { if (i915_gem_clflush_object(obj, 0)) flags &= ~EXEC_OBJECT_ASYNC; } /* We only need to await on the first request */ if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { err = i915_request_await_object (eb_find_first_request_added(eb), obj, flags & EXEC_OBJECT_WRITE); } for_each_batch_add_order(eb, j) { if (err) break; if (!eb->requests[j]) continue; err = _i915_vma_move_to_active(vma, eb->requests[j], j ? NULL : eb->composite_fence ? eb->composite_fence : &eb->requests[j]->fence, flags | __EXEC_OBJECT_NO_RESERVE | __EXEC_OBJECT_NO_REQUEST_AWAIT); } } #ifdef CONFIG_MMU_NOTIFIER if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { read_lock(&eb->i915->mm.notifier_lock); /* * count is always at least 1, otherwise __EXEC_USERPTR_USED * could not have been set */ for (i = 0; i < count; i++) { struct eb_vma *ev = &eb->vma[i]; struct drm_i915_gem_object *obj = ev->vma->obj; if (!i915_gem_object_is_userptr(obj)) continue; err = i915_gem_object_userptr_submit_done(obj); if (err) break; } read_unlock(&eb->i915->mm.notifier_lock); } #endif if (unlikely(err)) goto err_skip; /* Unconditionally flush any chipset caches (for streaming writes). */ intel_gt_chipset_flush(eb->gt); eb_capture_commit(eb); return 0; err_skip: for_each_batch_create_order(eb, j) { if (!eb->requests[j]) break; i915_request_set_error_once(eb->requests[j], err); } return err; } static int i915_gem_check_execbuffer(struct drm_i915_private *i915, struct drm_i915_gem_execbuffer2 *exec) { if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) return -EINVAL; /* Kernel clipping was a DRI1 misfeature */ if (!(exec->flags & (I915_EXEC_FENCE_ARRAY | I915_EXEC_USE_EXTENSIONS))) { if (exec->num_cliprects || exec->cliprects_ptr) return -EINVAL; } if (exec->DR4 == 0xffffffff) { drm_dbg(&i915->drm, "UXA submitting garbage DR4, fixing up\n"); exec->DR4 = 0; } if (exec->DR1 || exec->DR4) return -EINVAL; if ((exec->batch_start_offset | exec->batch_len) & 0x7) return -EINVAL; return 0; } static int i915_reset_gen7_sol_offsets(struct i915_request *rq) { u32 *cs; int i; if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) { drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } cs = intel_ring_begin(rq, 4 * 2 + 2); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(4); for (i = 0; i < 4; i++) { *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); *cs++ = 0; } *cs++ = MI_NOOP; intel_ring_advance(rq, cs); return 0; } static struct i915_vma * shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned int flags) { struct i915_vma *vma; int err; vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) return vma; err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE); if (err) return ERR_PTR(err); return vma; } static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma) { /* * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ if (eb->batch_flags & I915_DISPATCH_SECURE) return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE); return NULL; } static int eb_parse(struct i915_execbuffer *eb) { struct drm_i915_private *i915 = eb->i915; struct intel_gt_buffer_pool_node *pool = eb->batch_pool; struct i915_vma *shadow, *trampoline, *batch; unsigned long len; int err; if (!eb_use_cmdparser(eb)) { batch = eb_dispatch_secure(eb, eb->batches[0]->vma); if (IS_ERR(batch)) return PTR_ERR(batch); goto secure_batch; } if (intel_context_is_parallel(eb->context)) return -EINVAL; len = eb->batch_len[0]; if (!CMDPARSER_USES_GGTT(eb->i915)) { /* * ppGTT backed shadow buffers must be mapped RO, to prevent * post-scan tampering */ if (!eb->context->vm->has_read_only) { drm_dbg(&i915->drm, "Cannot prevent post-scan tampering without RO capable vm\n"); return -EINVAL; } } else { len += I915_CMD_PARSER_TRAMPOLINE_SIZE; } if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */ return -EINVAL; if (!pool) { pool = intel_gt_get_buffer_pool(eb->gt, len, I915_MAP_WB); if (IS_ERR(pool)) return PTR_ERR(pool); eb->batch_pool = pool; } err = i915_gem_object_lock(pool->obj, &eb->ww); if (err) return err; shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER); if (IS_ERR(shadow)) return PTR_ERR(shadow); intel_gt_buffer_pool_mark_used(pool); i915_gem_object_set_readonly(shadow->obj); shadow->private = pool; trampoline = NULL; if (CMDPARSER_USES_GGTT(eb->i915)) { trampoline = shadow; shadow = shadow_batch_pin(eb, pool->obj, &eb->gt->ggtt->vm, PIN_GLOBAL); if (IS_ERR(shadow)) return PTR_ERR(shadow); shadow->private = pool; eb->batch_flags |= I915_DISPATCH_SECURE; } batch = eb_dispatch_secure(eb, shadow); if (IS_ERR(batch)) return PTR_ERR(batch); err = dma_resv_reserve_fences(shadow->obj->base.resv, 1); if (err) return err; err = intel_engine_cmd_parser(eb->context->engine, eb->batches[0]->vma, eb->batch_start_offset, eb->batch_len[0], shadow, trampoline); if (err) return err; eb->batches[0] = &eb->vma[eb->buffer_count++]; eb->batches[0]->vma = i915_vma_get(shadow); eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; eb->trampoline = trampoline; eb->batch_start_offset = 0; secure_batch: if (batch) { if (intel_context_is_parallel(eb->context)) return -EINVAL; eb->batches[0] = &eb->vma[eb->buffer_count++]; eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; eb->batches[0]->vma = i915_vma_get(batch); } return 0; } static int eb_request_submit(struct i915_execbuffer *eb, struct i915_request *rq, struct i915_vma *batch, u64 batch_len) { int err; if (intel_context_nopreempt(rq->context)) __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { err = i915_reset_gen7_sol_offsets(rq); if (err) return err; } /* * After we completed waiting for other engines (using HW semaphores) * then we can signal that this request/batch is ready to run. This * allows us to determine if the batch is still waiting on the GPU * or actually running by checking the breadcrumb. */ if (rq->context->engine->emit_init_breadcrumb) { err = rq->context->engine->emit_init_breadcrumb(rq); if (err) return err; } err = rq->context->engine->emit_bb_start(rq, i915_vma_offset(batch) + eb->batch_start_offset, batch_len, eb->batch_flags); if (err) return err; if (eb->trampoline) { GEM_BUG_ON(intel_context_is_parallel(rq->context)); GEM_BUG_ON(eb->batch_start_offset); err = rq->context->engine->emit_bb_start(rq, i915_vma_offset(eb->trampoline) + batch_len, 0, 0); if (err) return err; } return 0; } static int eb_submit(struct i915_execbuffer *eb) { unsigned int i; int err; err = eb_move_to_gpu(eb); for_each_batch_create_order(eb, i) { if (!eb->requests[i]) break; trace_i915_request_queue(eb->requests[i], eb->batch_flags); if (!err) err = eb_request_submit(eb, eb->requests[i], eb->batches[i]->vma, eb->batch_len[i]); } return err; } /* * Find one BSD ring to dispatch the corresponding BSD command. * The engine index is returned. */ static unsigned int gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_engine < 0) file_priv->bsd_engine = get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); return file_priv->bsd_engine; } static const enum intel_engine_id user_ring_map[] = { [I915_EXEC_DEFAULT] = RCS0, [I915_EXEC_RENDER] = RCS0, [I915_EXEC_BLT] = BCS0, [I915_EXEC_BSD] = VCS0, [I915_EXEC_VEBOX] = VECS0 }; static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce) { struct intel_ring *ring = ce->ring; struct intel_timeline *tl = ce->timeline; struct i915_request *rq; /* * Completely unscientific finger-in-the-air estimates for suitable * maximum user request size (to avoid blocking) and then backoff. */ if (intel_ring_update_space(ring) >= PAGE_SIZE) return NULL; /* * Find a request that after waiting upon, there will be at least half * the ring available. The hysteresis allows us to compete for the * shared ring and should mean that we sleep less often prior to * claiming our resources, but not so long that the ring completely * drains before we can submit our next request. */ list_for_each_entry(rq, &tl->requests, link) { if (rq->ring != ring) continue; if (__intel_ring_space(rq->postfix, ring->emit, ring->size) > ring->size / 2) break; } if (&rq->link == &tl->requests) return NULL; /* weird, we will check again later for real */ return i915_request_get(rq); } static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, bool throttle) { struct intel_timeline *tl; struct i915_request *rq = NULL; /* * Take a local wakeref for preparing to dispatch the execbuf as * we expect to access the hardware fairly frequently in the * process, and require the engine to be kept awake between accesses. * Upon dispatch, we acquire another prolonged wakeref that we hold * until the timeline is idle, which in turn releases the wakeref * taken on the engine, and the parent device. */ tl = intel_context_timeline_lock(ce); if (IS_ERR(tl)) return PTR_ERR(tl); intel_context_enter(ce); if (throttle) rq = eb_throttle(eb, ce); intel_context_timeline_unlock(tl); if (rq) { bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT; if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, timeout) < 0) { i915_request_put(rq); /* * Error path, cannot use intel_context_timeline_lock as * that is user interruptable and this clean up step * must be done. */ mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); mutex_unlock(&ce->timeline->mutex); if (nonblock) return -EWOULDBLOCK; else return -EINTR; } i915_request_put(rq); } return 0; } static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle) { struct intel_context *ce = eb->context, *child; int err; int i = 0, j = 0; GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); if (unlikely(intel_context_is_banned(ce))) return -EIO; /* * Pinning the contexts may generate requests in order to acquire * GGTT space, so do this first before we reserve a seqno for * ourselves. */ err = intel_context_pin_ww(ce, &eb->ww); if (err) return err; for_each_child(ce, child) { err = intel_context_pin_ww(child, &eb->ww); GEM_BUG_ON(err); /* perma-pinned should incr a counter */ } for_each_child(ce, child) { err = eb_pin_timeline(eb, child, throttle); if (err) goto unwind; ++i; } err = eb_pin_timeline(eb, ce, throttle); if (err) goto unwind; eb->args->flags |= __EXEC_ENGINE_PINNED; return 0; unwind: for_each_child(ce, child) { if (j++ < i) { mutex_lock(&child->timeline->mutex); intel_context_exit(child); mutex_unlock(&child->timeline->mutex); } } for_each_child(ce, child) intel_context_unpin(child); intel_context_unpin(ce); return err; } static void eb_unpin_engine(struct i915_execbuffer *eb) { struct intel_context *ce = eb->context, *child; if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) return; eb->args->flags &= ~__EXEC_ENGINE_PINNED; for_each_child(ce, child) { mutex_lock(&child->timeline->mutex); intel_context_exit(child); mutex_unlock(&child->timeline->mutex); intel_context_unpin(child); } mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); mutex_unlock(&ce->timeline->mutex); intel_context_unpin(ce); } static unsigned int eb_select_legacy_ring(struct i915_execbuffer *eb) { struct drm_i915_private *i915 = eb->i915; struct drm_i915_gem_execbuffer2 *args = eb->args; unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; if (user_ring_id != I915_EXEC_BSD && (args->flags & I915_EXEC_BSD_MASK)) { drm_dbg(&i915->drm, "execbuf with non bsd ring but with invalid " "bsd dispatch flags: %d\n", (int)(args->flags)); return -1; } if (user_ring_id == I915_EXEC_BSD && i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO] > 1) { unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; if (bsd_idx == I915_EXEC_BSD_DEFAULT) { bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file); } else if (bsd_idx >= I915_EXEC_BSD_RING1 && bsd_idx <= I915_EXEC_BSD_RING2) { bsd_idx >>= I915_EXEC_BSD_SHIFT; bsd_idx--; } else { drm_dbg(&i915->drm, "execbuf with unknown bsd ring: %u\n", bsd_idx); return -1; } return _VCS(bsd_idx); } if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", user_ring_id); return -1; } return user_ring_map[user_ring_id]; } static int eb_select_engine(struct i915_execbuffer *eb) { struct intel_context *ce, *child; struct intel_gt *gt; unsigned int idx; int err; if (i915_gem_context_user_engines(eb->gem_context)) idx = eb->args->flags & I915_EXEC_RING_MASK; else idx = eb_select_legacy_ring(eb); ce = i915_gem_context_get_engine(eb->gem_context, idx); if (IS_ERR(ce)) return PTR_ERR(ce); if (intel_context_is_parallel(ce)) { if (eb->buffer_count < ce->parallel.number_children + 1) { intel_context_put(ce); return -EINVAL; } if (eb->batch_start_offset || eb->args->batch_len) { intel_context_put(ce); return -EINVAL; } } eb->num_batches = ce->parallel.number_children + 1; gt = ce->engine->gt; for_each_child(ce, child) intel_context_get(child); intel_gt_pm_get(gt); /* * Keep GT0 active on MTL so that i915_vma_parked() doesn't * free VMAs while execbuf ioctl is validating VMAs. */ if (gt->info.id) intel_gt_pm_get(to_gt(gt->i915)); if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { err = intel_context_alloc_state(ce); if (err) goto err; } for_each_child(ce, child) { if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) { err = intel_context_alloc_state(child); if (err) goto err; } } /* * ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ err = intel_gt_terminally_wedged(ce->engine->gt); if (err) goto err; if (!i915_vm_tryget(ce->vm)) { err = -ENOENT; goto err; } eb->context = ce; eb->gt = ce->engine->gt; /* * Make sure engine pool stays alive even if we call intel_context_put * during ww handling. The pool is destroyed when last pm reference * is dropped, which breaks our -EDEADLK handling. */ return err; err: if (gt->info.id) intel_gt_pm_put(to_gt(gt->i915)); intel_gt_pm_put(gt); for_each_child(ce, child) intel_context_put(child); intel_context_put(ce); return err; } static void eb_put_engine(struct i915_execbuffer *eb) { struct intel_context *child; i915_vm_put(eb->context->vm); /* * This works in conjunction with eb_select_engine() to prevent * i915_vma_parked() from interfering while execbuf validates vmas. */ if (eb->gt->info.id) intel_gt_pm_put(to_gt(eb->gt->i915)); intel_gt_pm_put(eb->gt); for_each_child(eb->context, child) intel_context_put(child); intel_context_put(eb->context); } static void __free_fence_array(struct eb_fence *fences, unsigned int n) { while (n--) { drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); dma_fence_put(fences[n].dma_fence); dma_fence_chain_free(fences[n].chain_fence); } kvfree(fences); } static int add_timeline_fence_array(struct i915_execbuffer *eb, const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences) { struct drm_i915_gem_exec_fence __user *user_fences; u64 __user *user_values; struct eb_fence *f; u64 nfences; int err = 0; nfences = timeline_fences->fence_count; if (!nfences) return 0; /* Check multiplication overflow for access_ok() and kvmalloc_array() */ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); if (nfences > min_t(unsigned long, ULONG_MAX / sizeof(*user_fences), SIZE_MAX / sizeof(*f)) - eb->num_fences) return -EINVAL; user_fences = u64_to_user_ptr(timeline_fences->handles_ptr); if (!access_ok(user_fences, nfences * sizeof(*user_fences))) return -EFAULT; user_values = u64_to_user_ptr(timeline_fences->values_ptr); if (!access_ok(user_values, nfences * sizeof(*user_values))) return -EFAULT; f = krealloc(eb->fences, (eb->num_fences + nfences) * sizeof(*f), __GFP_NOWARN | GFP_KERNEL); if (!f) return -ENOMEM; eb->fences = f; f += eb->num_fences; BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); while (nfences--) { struct drm_i915_gem_exec_fence user_fence; struct drm_syncobj *syncobj; struct dma_fence *fence = NULL; u64 point; if (__copy_from_user(&user_fence, user_fences++, sizeof(user_fence))) return -EFAULT; if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) return -EINVAL; if (__get_user(point, user_values++)) return -EFAULT; syncobj = drm_syncobj_find(eb->file, user_fence.handle); if (!syncobj) { drm_dbg(&eb->i915->drm, "Invalid syncobj handle provided\n"); return -ENOENT; } fence = drm_syncobj_fence_get(syncobj); if (!fence && user_fence.flags && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { drm_dbg(&eb->i915->drm, "Syncobj handle has no fence\n"); drm_syncobj_put(syncobj); return -EINVAL; } if (fence) err = dma_fence_chain_find_seqno(&fence, point); if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { drm_dbg(&eb->i915->drm, "Syncobj handle missing requested point %llu\n", point); dma_fence_put(fence); drm_syncobj_put(syncobj); return err; } /* * A point might have been signaled already and * garbage collected from the timeline. In this case * just ignore the point and carry on. */ if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { drm_syncobj_put(syncobj); continue; } /* * For timeline syncobjs we need to preallocate chains for * later signaling. */ if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) { /* * Waiting and signaling the same point (when point != * 0) would break the timeline. */ if (user_fence.flags & I915_EXEC_FENCE_WAIT) { drm_dbg(&eb->i915->drm, "Trying to wait & signal the same timeline point.\n"); dma_fence_put(fence); drm_syncobj_put(syncobj); return -EINVAL; } f->chain_fence = dma_fence_chain_alloc(); if (!f->chain_fence) { drm_syncobj_put(syncobj); dma_fence_put(fence); return -ENOMEM; } } else { f->chain_fence = NULL; } f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); f->dma_fence = fence; f->value = point; f++; eb->num_fences++; } return 0; } static int add_fence_array(struct i915_execbuffer *eb) { struct drm_i915_gem_execbuffer2 *args = eb->args; struct drm_i915_gem_exec_fence __user *user; unsigned long num_fences = args->num_cliprects; struct eb_fence *f; if (!(args->flags & I915_EXEC_FENCE_ARRAY)) return 0; if (!num_fences) return 0; /* Check multiplication overflow for access_ok() and kvmalloc_array() */ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); if (num_fences > min_t(unsigned long, ULONG_MAX / sizeof(*user), SIZE_MAX / sizeof(*f) - eb->num_fences)) return -EINVAL; user = u64_to_user_ptr(args->cliprects_ptr); if (!access_ok(user, num_fences * sizeof(*user))) return -EFAULT; f = krealloc(eb->fences, (eb->num_fences + num_fences) * sizeof(*f), __GFP_NOWARN | GFP_KERNEL); if (!f) return -ENOMEM; eb->fences = f; f += eb->num_fences; while (num_fences--) { struct drm_i915_gem_exec_fence user_fence; struct drm_syncobj *syncobj; struct dma_fence *fence = NULL; if (__copy_from_user(&user_fence, user++, sizeof(user_fence))) return -EFAULT; if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) return -EINVAL; syncobj = drm_syncobj_find(eb->file, user_fence.handle); if (!syncobj) { drm_dbg(&eb->i915->drm, "Invalid syncobj handle provided\n"); return -ENOENT; } if (user_fence.flags & I915_EXEC_FENCE_WAIT) { fence = drm_syncobj_fence_get(syncobj); if (!fence) { drm_dbg(&eb->i915->drm, "Syncobj handle has no fence\n"); drm_syncobj_put(syncobj); return -EINVAL; } } BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); f->dma_fence = fence; f->value = 0; f->chain_fence = NULL; f++; eb->num_fences++; } return 0; } static void put_fence_array(struct eb_fence *fences, int num_fences) { if (fences) __free_fence_array(fences, num_fences); } static int await_fence_array(struct i915_execbuffer *eb, struct i915_request *rq) { unsigned int n; int err; for (n = 0; n < eb->num_fences; n++) { if (!eb->fences[n].dma_fence) continue; err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence); if (err < 0) return err; } return 0; } static void signal_fence_array(const struct i915_execbuffer *eb, struct dma_fence * const fence) { unsigned int n; for (n = 0; n < eb->num_fences; n++) { struct drm_syncobj *syncobj; unsigned int flags; syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2); if (!(flags & I915_EXEC_FENCE_SIGNAL)) continue; if (eb->fences[n].chain_fence) { drm_syncobj_add_point(syncobj, eb->fences[n].chain_fence, fence, eb->fences[n].value); /* * The chain's ownership is transferred to the * timeline. */ eb->fences[n].chain_fence = NULL; } else { drm_syncobj_replace_fence(syncobj, fence); } } } static int parse_timeline_fences(struct i915_user_extension __user *ext, void *data) { struct i915_execbuffer *eb = data; struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences; if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences))) return -EFAULT; return add_timeline_fence_array(eb, &timeline_fences); } static void retire_requests(struct intel_timeline *tl, struct i915_request *end) { struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &tl->requests, link) if (rq == end || !i915_request_retire(rq)) break; } static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq, int err, bool last_parallel) { struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_sched_attr attr = {}; struct i915_request *prev; lockdep_assert_held(&tl->mutex); lockdep_unpin_lock(&tl->mutex, rq->cookie); trace_i915_request_add(rq); prev = __i915_request_commit(rq); /* Check that the context wasn't destroyed before submission */ if (likely(!intel_context_is_closed(eb->context))) { attr = eb->gem_context->sched; } else { /* Serialise with context_close via the add_to_timeline */ i915_request_set_error_once(rq, -ENOENT); __i915_request_skip(rq); err = -ENOENT; /* override any transient errors */ } if (intel_context_is_parallel(eb->context)) { if (err) { __i915_request_skip(rq); set_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags); } if (last_parallel) set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags); } __i915_request_queue(rq, &attr); /* Try to clean up the client's timeline after submitting the request */ if (prev) retire_requests(tl, prev); mutex_unlock(&tl->mutex); return err; } static int eb_requests_add(struct i915_execbuffer *eb, int err) { int i; /* * We iterate in reverse order of creation to release timeline mutexes in * same order. */ for_each_batch_add_order(eb, i) { struct i915_request *rq = eb->requests[i]; if (!rq) continue; err |= eb_request_add(eb, rq, err, i == 0); } return err; } static const i915_user_extension_fn execbuf_extensions[] = { [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, }; static int parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, struct i915_execbuffer *eb) { if (!(args->flags & I915_EXEC_USE_EXTENSIONS)) return 0; /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot * have another flag also using it at the same time. */ if (eb->args->flags & I915_EXEC_FENCE_ARRAY) return -EINVAL; if (args->num_cliprects != 0) return -EINVAL; return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr), execbuf_extensions, ARRAY_SIZE(execbuf_extensions), eb); } static void eb_requests_get(struct i915_execbuffer *eb) { unsigned int i; for_each_batch_create_order(eb, i) { if (!eb->requests[i]) break; i915_request_get(eb->requests[i]); } } static void eb_requests_put(struct i915_execbuffer *eb) { unsigned int i; for_each_batch_create_order(eb, i) { if (!eb->requests[i]) break; i915_request_put(eb->requests[i]); } } static struct sync_file * eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) { struct sync_file *out_fence = NULL; struct dma_fence_array *fence_array; struct dma_fence **fences; unsigned int i; GEM_BUG_ON(!intel_context_is_parent(eb->context)); fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); if (!fences) return ERR_PTR(-ENOMEM); for_each_batch_create_order(eb, i) { fences[i] = &eb->requests[i]->fence; __set_bit(I915_FENCE_FLAG_COMPOSITE, &eb->requests[i]->fence.flags); } fence_array = dma_fence_array_create(eb->num_batches, fences, eb->context->parallel.fence_context, eb->context->parallel.seqno++, false); if (!fence_array) { kfree(fences); return ERR_PTR(-ENOMEM); } /* Move ownership to the dma_fence_array created above */ for_each_batch_create_order(eb, i) dma_fence_get(fences[i]); if (out_fence_fd != -1) { out_fence = sync_file_create(&fence_array->base); /* sync_file now owns fence_arry, drop creation ref */ dma_fence_put(&fence_array->base); if (!out_fence) return ERR_PTR(-ENOMEM); } eb->composite_fence = &fence_array->base; return out_fence; } static struct sync_file * eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq, struct dma_fence *in_fence, int out_fence_fd) { struct sync_file *out_fence = NULL; int err; if (unlikely(eb->gem_context->syncobj)) { struct dma_fence *fence; fence = drm_syncobj_fence_get(eb->gem_context->syncobj); err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); if (err) return ERR_PTR(err); } if (in_fence) { if (eb->args->flags & I915_EXEC_FENCE_SUBMIT) err = i915_request_await_execution(rq, in_fence); else err = i915_request_await_dma_fence(rq, in_fence); if (err < 0) return ERR_PTR(err); } if (eb->fences) { err = await_fence_array(eb, rq); if (err) return ERR_PTR(err); } if (intel_context_is_parallel(eb->context)) { out_fence = eb_composite_fence_create(eb, out_fence_fd); if (IS_ERR(out_fence)) return ERR_PTR(-ENOMEM); } else if (out_fence_fd != -1) { out_fence = sync_file_create(&rq->fence); if (!out_fence) return ERR_PTR(-ENOMEM); } return out_fence; } static struct intel_context * eb_find_context(struct i915_execbuffer *eb, unsigned int context_number) { struct intel_context *child; if (likely(context_number == 0)) return eb->context; for_each_child(eb->context, child) if (!--context_number) return child; GEM_BUG_ON("Context not found"); return NULL; } static struct sync_file * eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, int out_fence_fd) { struct sync_file *out_fence = NULL; unsigned int i; for_each_batch_create_order(eb, i) { /* Allocate a request for this batch buffer nice and early. */ eb->requests[i] = i915_request_create(eb_find_context(eb, i)); if (IS_ERR(eb->requests[i])) { out_fence = ERR_CAST(eb->requests[i]); eb->requests[i] = NULL; return out_fence; } /* * Only the first request added (committed to backend) has to * take the in fences into account as all subsequent requests * will have fences inserted inbetween them. */ if (i + 1 == eb->num_batches) { out_fence = eb_fences_add(eb, eb->requests[i], in_fence, out_fence_fd); if (IS_ERR(out_fence)) return out_fence; } /* * Not really on stack, but we don't want to call * kfree on the batch_snapshot when we put it, so use the * _onstack interface. */ if (eb->batches[i]->vma) eb->requests[i]->batch_res = i915_vma_resource_get(eb->batches[i]->vma->resource); if (eb->batch_pool) { GEM_BUG_ON(intel_context_is_parallel(eb->context)); intel_gt_buffer_pool_mark_active(eb->batch_pool, eb->requests[i]); } } return out_fence; } static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_private *i915 = to_i915(dev); struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; int out_fence_fd = -1; int err; BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); eb.i915 = i915; eb.file = file; eb.args = args; if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); eb.vma[0].vma = NULL; eb.batch_pool = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; reloc_cache_init(&eb.reloc_cache, eb.i915); eb.buffer_count = args->buffer_count; eb.batch_start_offset = args->batch_start_offset; eb.trampoline = NULL; eb.fences = NULL; eb.num_fences = 0; eb_capture_list_clear(&eb); memset(eb.requests, 0, sizeof(struct i915_request *) * ARRAY_SIZE(eb.requests)); eb.composite_fence = NULL; eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { if (GRAPHICS_VER(i915) >= 11) return -ENODEV; /* Return -EPERM to trigger fallback code on old binaries. */ if (!HAS_SECURE_BATCHES(i915)) return -EPERM; if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) return -EPERM; eb.batch_flags |= I915_DISPATCH_SECURE; } if (args->flags & I915_EXEC_IS_PINNED) eb.batch_flags |= I915_DISPATCH_PINNED; err = parse_execbuf2_extensions(args, &eb); if (err) goto err_ext; err = add_fence_array(&eb); if (err) goto err_ext; #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT) if (args->flags & IN_FENCES) { if ((args->flags & IN_FENCES) == IN_FENCES) return -EINVAL; in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); if (!in_fence) { err = -EINVAL; goto err_ext; } } #undef IN_FENCES if (args->flags & I915_EXEC_FENCE_OUT) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { err = out_fence_fd; goto err_in_fence; } } err = eb_create(&eb); if (err) goto err_out_fence; GEM_BUG_ON(!eb.lut_size); err = eb_select_context(&eb); if (unlikely(err)) goto err_destroy; err = eb_select_engine(&eb); if (unlikely(err)) goto err_context; err = eb_lookup_vmas(&eb); if (err) { eb_release_vmas(&eb, true); goto err_engine; } i915_gem_ww_ctx_init(&eb.ww, true); err = eb_relocate_parse(&eb); if (err) { /* * If the user expects the execobject.offset and * reloc.presumed_offset to be an exact match, * as for using NO_RELOC, then we cannot update * the execobject.offset until we have completed * relocation. */ args->flags &= ~__EXEC_HAS_RELOC; goto err_vma; } ww_acquire_done(&eb.ww.ctx); err = eb_capture_stage(&eb); if (err) goto err_vma; out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); if (IS_ERR(out_fence)) { err = PTR_ERR(out_fence); out_fence = NULL; if (eb.requests[0]) goto err_request; else goto err_vma; } err = eb_submit(&eb); err_request: eb_requests_get(&eb); err = eb_requests_add(&eb, err); if (eb.fences) signal_fence_array(&eb, eb.composite_fence ? eb.composite_fence : &eb.requests[0]->fence); if (unlikely(eb.gem_context->syncobj)) { drm_syncobj_replace_fence(eb.gem_context->syncobj, eb.composite_fence ? eb.composite_fence : &eb.requests[0]->fence); } if (out_fence) { if (err == 0) { fd_install(out_fence_fd, out_fence->file); args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ args->rsvd2 |= (u64)out_fence_fd << 32; out_fence_fd = -1; } else { fput(out_fence->file); } } if (!out_fence && eb.composite_fence) dma_fence_put(eb.composite_fence); eb_requests_put(&eb); err_vma: eb_release_vmas(&eb, true); WARN_ON(err == -EDEADLK); i915_gem_ww_ctx_fini(&eb.ww); if (eb.batch_pool) intel_gt_buffer_pool_put(eb.batch_pool); err_engine: eb_put_engine(&eb); err_context: i915_gem_context_put(eb.gem_context); err_destroy: eb_destroy(&eb); err_out_fence: if (out_fence_fd != -1) put_unused_fd(out_fence_fd); err_in_fence: dma_fence_put(in_fence); err_ext: put_fence_array(eb.fences, eb.num_fences); return err; } static size_t eb_element_size(void) { return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); } static bool check_buffer_count(size_t count) { const size_t sz = eb_element_size(); /* * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup * array size (see eb_create()). Otherwise, we can accept an array as * large as can be addressed (though use large arrays at your peril)! */ return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); } int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list; const size_t count = args->buffer_count; int err; if (!check_buffer_count(count)) { drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); return -EINVAL; } err = i915_gem_check_execbuffer(i915, args); if (err) return err; /* Allocate extra slots for use by the command parser */ exec2_list = kvmalloc_array(count + 2, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", count); return -ENOMEM; } if (copy_from_user(exec2_list, u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * count)) { drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); kvfree(exec2_list); return -EFAULT; } err = i915_gem_do_execbuffer(dev, file, args, exec2_list); /* * Now that we have begun execution of the batchbuffer, we ignore * any new error after this point. Also given that we have already * updated the associated relocations, we try to write out the current * object locations irrespective of any error. */ if (args->flags & __EXEC_HAS_RELOC) { struct drm_i915_gem_exec_object2 __user *user_exec_list = u64_to_user_ptr(args->buffers_ptr); unsigned int i; /* Copy the new buffer offsets back to the user's exec list. */ /* * Note: count * sizeof(*user_exec_list) does not overflow, * because we checked 'count' in check_buffer_count(). * * And this range already got effectively checked earlier * when we did the "copy_from_user()" above. */ if (!user_write_access_begin(user_exec_list, count * sizeof(*user_exec_list))) goto end; for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) continue; exec2_list[i].offset = gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); unsafe_put_user(exec2_list[i].offset, &user_exec_list[i].offset, end_user); } end_user: user_write_access_end(); end:; } args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; kvfree(exec2_list); return err; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2011-2012 Intel Corporation */ /* * This file implements HW context support. On gen5+ a HW context consists of an * opaque GPU object which is referenced at times of context saves and restores. * With RC6 enabled, the context is also referenced as the GPU enters and exists * from RC6 (GPU has it's own internal power context, except on gen5). Though * something like a context does exist for the media ring, the code only * supports contexts for the render ring. * * In software, there is a distinction between contexts created by the user, * and the default HW context. The default HW context is used by GPU clients * that do not request setup of their own hardware context. The default * context's state is never restored to help prevent programming errors. This * would happen if a client ran and piggy-backed off another clients GPU state. * The default context only exists to give the GPU some offset to load as the * current to invoke a save of the context we actually care about. In fact, the * code could likely be constructed, albeit in a more complicated fashion, to * never use the default context, though that limits the driver's ability to * swap out, and/or destroy other contexts. * * All other contexts are created as a request by the GPU client. These contexts * store GPU state, and thus allow GPU clients to not re-emit state (and * potentially query certain state) at any time. The kernel driver makes * certain that the appropriate commands are inserted. * * The context life cycle is semi-complicated in that context BOs may live * longer than the context itself because of the way the hardware, and object * tracking works. Below is a very crude representation of the state machine * describing the context life. * refcount pincount active * S0: initial state 0 0 0 * S1: context created 1 0 0 * S2: context is currently running 2 1 X * S3: GPU referenced, but not current 2 0 1 * S4: context is current, but destroyed 1 1 0 * S5: like S3, but destroyed 1 0 1 * * The most common (but not all) transitions: * S0->S1: client creates a context * S1->S2: client submits execbuf with context * S2->S3: other clients submits execbuf with context * S3->S1: context object was retired * S3->S2: clients submits another execbuf * S2->S4: context destroy called with current context * S3->S5->S0: destroy path * S4->S5->S0: destroy path on current context * * There are two confusing terms used above: * The "current context" means the context which is currently running on the * GPU. The GPU has loaded its state already and has stored away the gtt * offset of the BO. The GPU is not actively referencing the data at this * offset, but it will on the next context switch. The only way to avoid this * is to do a GPU reset. * * An "active context' is one which was previously the "current context" and is * on the active list waiting for the next context switch to occur. Until this * happens, the object must remain at the same gtt offset. It is therefore * possible to destroy a context, but it is still active. * */ #include <linux/highmem.h> #include <linux/log2.h> #include <linux/nospec.h> #include <drm/drm_cache.h> #include <drm/drm_syncobj.h> #include "gt/gen6_ppgtt.h" #include "gt/intel_context.h" #include "gt/intel_context_param.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_user.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "pxp/intel_pxp.h" #include "i915_file_private.h" #include "i915_gem_context.h" #include "i915_trace.h" #include "i915_user_extensions.h" #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 static struct kmem_cache *slab_luts; struct i915_lut_handle *i915_lut_handle_alloc(void) { return kmem_cache_alloc(slab_luts, GFP_KERNEL); } void i915_lut_handle_free(struct i915_lut_handle *lut) { return kmem_cache_free(slab_luts, lut); } static void lut_close(struct i915_gem_context *ctx) { struct radix_tree_iter iter; void __rcu **slot; mutex_lock(&ctx->lut_mutex); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); struct drm_i915_gem_object *obj = vma->obj; struct i915_lut_handle *lut; if (!kref_get_unless_zero(&obj->base.refcount)) continue; spin_lock(&obj->lut_lock); list_for_each_entry(lut, &obj->lut_list, obj_link) { if (lut->ctx != ctx) continue; if (lut->handle != iter.index) continue; list_del(&lut->obj_link); break; } spin_unlock(&obj->lut_lock); if (&lut->obj_link != &obj->lut_list) { i915_lut_handle_free(lut); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); i915_vma_close(vma); i915_gem_object_put(obj); } i915_gem_object_put(obj); } rcu_read_unlock(); mutex_unlock(&ctx->lut_mutex); } static struct intel_context * lookup_user_engine(struct i915_gem_context *ctx, unsigned long flags, const struct i915_engine_class_instance *ci) #define LOOKUP_USER_INDEX BIT(0) { int idx; if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) return ERR_PTR(-EINVAL); if (!i915_gem_context_user_engines(ctx)) { struct intel_engine_cs *engine; engine = intel_engine_lookup_user(ctx->i915, ci->engine_class, ci->engine_instance); if (!engine) return ERR_PTR(-EINVAL); idx = engine->legacy_idx; } else { idx = ci->engine_instance; } return i915_gem_context_get_engine(ctx, idx); } static int validate_priority(struct drm_i915_private *i915, const struct drm_i915_gem_context_param *args) { s64 priority = args->value; if (args->size) return -EINVAL; if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) return -ENODEV; if (priority > I915_CONTEXT_MAX_USER_PRIORITY || priority < I915_CONTEXT_MIN_USER_PRIORITY) return -EINVAL; if (priority > I915_CONTEXT_DEFAULT_PRIORITY && !capable(CAP_SYS_NICE)) return -EPERM; return 0; } static void proto_context_close(struct drm_i915_private *i915, struct i915_gem_proto_context *pc) { int i; if (pc->pxp_wakeref) intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); if (pc->vm) i915_vm_put(pc->vm); if (pc->user_engines) { for (i = 0; i < pc->num_user_engines; i++) kfree(pc->user_engines[i].siblings); kfree(pc->user_engines); } kfree(pc); } static int proto_context_set_persistence(struct drm_i915_private *i915, struct i915_gem_proto_context *pc, bool persist) { if (persist) { /* * Only contexts that are short-lived [that will expire or be * reset] are allowed to survive past termination. We require * hangcheck to ensure that the persistent requests are healthy. */ if (!i915->params.enable_hangcheck) return -EINVAL; pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); } else { /* To cancel a context we use "preempt-to-idle" */ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) return -ENODEV; /* * If the cancel fails, we then need to reset, cleanly! * * If the per-engine reset fails, all hope is lost! We resort * to a full GPU reset in that unlikely case, but realistically * if the engine could not reset, the full reset does not fare * much better. The damage has been done. * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing * colateral damage, and we should not pretend we can by * exposing the interface. */ if (!intel_has_reset_engine(to_gt(i915))) return -ENODEV; pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); } return 0; } static int proto_context_set_protected(struct drm_i915_private *i915, struct i915_gem_proto_context *pc, bool protected) { int ret = 0; if (!protected) { pc->uses_protected_content = false; } else if (!intel_pxp_is_enabled(i915->pxp)) { ret = -ENODEV; } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { ret = -EPERM; } else { pc->uses_protected_content = true; /* * protected context usage requires the PXP session to be up, * which in turn requires the device to be active. */ pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (!intel_pxp_is_active(i915->pxp)) ret = intel_pxp_start(i915->pxp); } return ret; } static struct i915_gem_proto_context * proto_context_create(struct drm_i915_private *i915, unsigned int flags) { struct i915_gem_proto_context *pc, *err; pc = kzalloc(sizeof(*pc), GFP_KERNEL); if (!pc) return ERR_PTR(-ENOMEM); pc->num_user_engines = -1; pc->user_engines = NULL; pc->user_flags = BIT(UCONTEXT_BANNABLE) | BIT(UCONTEXT_RECOVERABLE); if (i915->params.enable_hangcheck) pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); pc->sched.priority = I915_PRIORITY_NORMAL; if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { if (!HAS_EXECLISTS(i915)) { err = ERR_PTR(-EINVAL); goto proto_close; } pc->single_timeline = true; } return pc; proto_close: proto_context_close(i915, pc); return err; } static int proto_context_register_locked(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, u32 *id) { int ret; void *old; lockdep_assert_held(&fpriv->proto_context_lock); ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); if (ret) return ret; old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); if (xa_is_err(old)) { xa_erase(&fpriv->context_xa, *id); return xa_err(old); } WARN_ON(old); return 0; } static int proto_context_register(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, u32 *id) { int ret; mutex_lock(&fpriv->proto_context_lock); ret = proto_context_register_locked(fpriv, pc, id); mutex_unlock(&fpriv->proto_context_lock); return ret; } static struct i915_address_space * i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_address_space *vm; xa_lock(&file_priv->vm_xa); vm = xa_load(&file_priv->vm_xa, id); if (vm) kref_get(&vm->ref); xa_unlock(&file_priv->vm_xa); return vm; } static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, const struct drm_i915_gem_context_param *args) { struct drm_i915_private *i915 = fpriv->i915; struct i915_address_space *vm; if (args->size) return -EINVAL; if (!HAS_FULL_PPGTT(i915)) return -ENODEV; if (upper_32_bits(args->value)) return -ENOENT; vm = i915_gem_vm_lookup(fpriv, args->value); if (!vm) return -ENOENT; if (pc->vm) i915_vm_put(pc->vm); pc->vm = vm; return 0; } struct set_proto_ctx_engines { struct drm_i915_private *i915; unsigned num_engines; struct i915_gem_proto_engine *engines; }; static int set_proto_ctx_engines_balance(struct i915_user_extension __user *base, void *data) { struct i915_context_engines_load_balance __user *ext = container_of_user(base, typeof(*ext), base); const struct set_proto_ctx_engines *set = data; struct drm_i915_private *i915 = set->i915; struct intel_engine_cs **siblings; u16 num_siblings, idx; unsigned int n; int err; if (!HAS_EXECLISTS(i915)) return -ENODEV; if (get_user(idx, &ext->engine_index)) return -EFAULT; if (idx >= set->num_engines) { drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", idx, set->num_engines); return -EINVAL; } idx = array_index_nospec(idx, set->num_engines); if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { drm_dbg(&i915->drm, "Invalid placement[%d], already occupied\n", idx); return -EEXIST; } if (get_user(num_siblings, &ext->num_siblings)) return -EFAULT; err = check_user_mbz(&ext->flags); if (err) return err; err = check_user_mbz(&ext->mbz64); if (err) return err; if (num_siblings == 0) return 0; siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); if (!siblings) return -ENOMEM; for (n = 0; n < num_siblings; n++) { struct i915_engine_class_instance ci; if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { err = -EFAULT; goto err_siblings; } siblings[n] = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!siblings[n]) { drm_dbg(&i915->drm, "Invalid sibling[%d]: { class:%d, inst:%d }\n", n, ci.engine_class, ci.engine_instance); err = -EINVAL; goto err_siblings; } } if (num_siblings == 1) { set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; set->engines[idx].engine = siblings[0]; kfree(siblings); } else { set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; set->engines[idx].num_siblings = num_siblings; set->engines[idx].siblings = siblings; } return 0; err_siblings: kfree(siblings); return err; } static int set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) { struct i915_context_engines_bond __user *ext = container_of_user(base, typeof(*ext), base); const struct set_proto_ctx_engines *set = data; struct drm_i915_private *i915 = set->i915; struct i915_engine_class_instance ci; struct intel_engine_cs *master; u16 idx, num_bonds; int err, n; if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { drm_dbg(&i915->drm, "Bonding not supported on this platform\n"); return -ENODEV; } if (get_user(idx, &ext->virtual_index)) return -EFAULT; if (idx >= set->num_engines) { drm_dbg(&i915->drm, "Invalid index for virtual engine: %d >= %d\n", idx, set->num_engines); return -EINVAL; } idx = array_index_nospec(idx, set->num_engines); if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); return -EINVAL; } if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { drm_dbg(&i915->drm, "Bonding with virtual engines not allowed\n"); return -EINVAL; } err = check_user_mbz(&ext->flags); if (err) return err; for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { err = check_user_mbz(&ext->mbz64[n]); if (err) return err; } if (copy_from_user(&ci, &ext->master, sizeof(ci))) return -EFAULT; master = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!master) { drm_dbg(&i915->drm, "Unrecognised master engine: { class:%u, instance:%u }\n", ci.engine_class, ci.engine_instance); return -EINVAL; } if (intel_engine_uses_guc(master)) { drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); return -ENODEV; } if (get_user(num_bonds, &ext->num_bonds)) return -EFAULT; for (n = 0; n < num_bonds; n++) { struct intel_engine_cs *bond; if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) return -EFAULT; bond = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!bond) { drm_dbg(&i915->drm, "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", n, ci.engine_class, ci.engine_instance); return -EINVAL; } } return 0; } static int set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, void *data) { struct i915_context_engines_parallel_submit __user *ext = container_of_user(base, typeof(*ext), base); const struct set_proto_ctx_engines *set = data; struct drm_i915_private *i915 = set->i915; struct i915_engine_class_instance prev_engine; u64 flags; int err = 0, n, i, j; u16 slot, width, num_siblings; struct intel_engine_cs **siblings = NULL; intel_engine_mask_t prev_mask; if (get_user(slot, &ext->engine_index)) return -EFAULT; if (get_user(width, &ext->width)) return -EFAULT; if (get_user(num_siblings, &ext->num_siblings)) return -EFAULT; if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && num_siblings != 1) { drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", num_siblings); return -EINVAL; } if (slot >= set->num_engines) { drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", slot, set->num_engines); return -EINVAL; } if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { drm_dbg(&i915->drm, "Invalid placement[%d], already occupied\n", slot); return -EINVAL; } if (get_user(flags, &ext->flags)) return -EFAULT; if (flags) { drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); return -EINVAL; } for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { err = check_user_mbz(&ext->mbz64[n]); if (err) return err; } if (width < 2) { drm_dbg(&i915->drm, "Width (%d) < 2\n", width); return -EINVAL; } if (num_siblings < 1) { drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", num_siblings); return -EINVAL; } siblings = kmalloc_array(num_siblings * width, sizeof(*siblings), GFP_KERNEL); if (!siblings) return -ENOMEM; /* Create contexts / engines */ for (i = 0; i < width; ++i) { intel_engine_mask_t current_mask = 0; for (j = 0; j < num_siblings; ++j) { struct i915_engine_class_instance ci; n = i * num_siblings + j; if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { err = -EFAULT; goto out_err; } siblings[n] = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!siblings[n]) { drm_dbg(&i915->drm, "Invalid sibling[%d]: { class:%d, inst:%d }\n", n, ci.engine_class, ci.engine_instance); err = -EINVAL; goto out_err; } /* * We don't support breadcrumb handshake on these * classes */ if (siblings[n]->class == RENDER_CLASS || siblings[n]->class == COMPUTE_CLASS) { err = -EINVAL; goto out_err; } if (n) { if (prev_engine.engine_class != ci.engine_class) { drm_dbg(&i915->drm, "Mismatched class %d, %d\n", prev_engine.engine_class, ci.engine_class); err = -EINVAL; goto out_err; } } prev_engine = ci; current_mask |= siblings[n]->logical_mask; } if (i > 0) { if (current_mask != prev_mask << 1) { drm_dbg(&i915->drm, "Non contiguous logical mask 0x%x, 0x%x\n", prev_mask, current_mask); err = -EINVAL; goto out_err; } } prev_mask = current_mask; } set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; set->engines[slot].num_siblings = num_siblings; set->engines[slot].width = width; set->engines[slot].siblings = siblings; return 0; out_err: kfree(siblings); return err; } static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = set_proto_ctx_engines_parallel_submit, }; static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, const struct drm_i915_gem_context_param *args) { struct drm_i915_private *i915 = fpriv->i915; struct set_proto_ctx_engines set = { .i915 = i915 }; struct i915_context_param_engines __user *user = u64_to_user_ptr(args->value); unsigned int n; u64 extensions; int err; if (pc->num_user_engines >= 0) { drm_dbg(&i915->drm, "Cannot set engines twice"); return -EINVAL; } if (args->size < sizeof(*user) || !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", args->size); return -EINVAL; } set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); /* RING_MASK has no shift so we can use it directly here */ if (set.num_engines > I915_EXEC_RING_MASK + 1) return -EINVAL; set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); if (!set.engines) return -ENOMEM; for (n = 0; n < set.num_engines; n++) { struct i915_engine_class_instance ci; struct intel_engine_cs *engine; if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { kfree(set.engines); return -EFAULT; } memset(&set.engines[n], 0, sizeof(set.engines[n])); if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) continue; engine = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!engine) { drm_dbg(&i915->drm, "Invalid engine[%d]: { class:%d, instance:%d }\n", n, ci.engine_class, ci.engine_instance); kfree(set.engines); return -ENOENT; } set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; set.engines[n].engine = engine; } err = -EFAULT; if (!get_user(extensions, &user->extensions)) err = i915_user_extensions(u64_to_user_ptr(extensions), set_proto_ctx_engines_extensions, ARRAY_SIZE(set_proto_ctx_engines_extensions), &set); if (err) { kfree(set.engines); return err; } pc->num_user_engines = set.num_engines; pc->user_engines = set.engines; return 0; } static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, struct drm_i915_gem_context_param *args) { struct drm_i915_private *i915 = fpriv->i915; struct drm_i915_gem_context_param_sseu user_sseu; struct intel_sseu *sseu; int ret; if (args->size < sizeof(user_sseu)) return -EINVAL; if (GRAPHICS_VER(i915) != 11) return -ENODEV; if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), sizeof(user_sseu))) return -EFAULT; if (user_sseu.rsvd) return -EINVAL; if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) return -EINVAL; if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) return -EINVAL; if (pc->num_user_engines >= 0) { int idx = user_sseu.engine.engine_instance; struct i915_gem_proto_engine *pe; if (idx >= pc->num_user_engines) return -EINVAL; pe = &pc->user_engines[idx]; /* Only render engine supports RPCS configuration. */ if (pe->engine->class != RENDER_CLASS) return -EINVAL; sseu = &pe->sseu; } else { /* Only render engine supports RPCS configuration. */ if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) return -EINVAL; /* There is only one render engine */ if (user_sseu.engine.engine_instance != 0) return -EINVAL; sseu = &pc->legacy_rcs_sseu; } ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); if (ret) return ret; args->size = sizeof(user_sseu); return 0; } static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, struct drm_i915_gem_context_param *args) { int ret = 0; switch (args->param) { case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size) ret = -EINVAL; else if (args->value) pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); else pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); break; case I915_CONTEXT_PARAM_BANNABLE: if (args->size) ret = -EINVAL; else if (!capable(CAP_SYS_ADMIN) && !args->value) ret = -EPERM; else if (args->value) pc->user_flags |= BIT(UCONTEXT_BANNABLE); else if (pc->uses_protected_content) ret = -EPERM; else pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break; case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; else if (!args->value) pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); else if (pc->uses_protected_content) ret = -EPERM; else pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); break; case I915_CONTEXT_PARAM_PRIORITY: ret = validate_priority(fpriv->i915, args); if (!ret) pc->sched.priority = args->value; break; case I915_CONTEXT_PARAM_SSEU: ret = set_proto_ctx_sseu(fpriv, pc, args); break; case I915_CONTEXT_PARAM_VM: ret = set_proto_ctx_vm(fpriv, pc, args); break; case I915_CONTEXT_PARAM_ENGINES: ret = set_proto_ctx_engines(fpriv, pc, args); break; case I915_CONTEXT_PARAM_PERSISTENCE: if (args->size) ret = -EINVAL; else ret = proto_context_set_persistence(fpriv->i915, pc, args->value); break; case I915_CONTEXT_PARAM_PROTECTED_CONTENT: ret = proto_context_set_protected(fpriv->i915, pc, args->value); break; case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: default: ret = -EINVAL; break; } return ret; } static int intel_context_set_gem(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_sseu sseu) { int ret = 0; GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); RCU_INIT_POINTER(ce->gem_context, ctx); GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->engine->class == COMPUTE_CLASS) ce->ring_size = SZ_512K; else ce->ring_size = SZ_16K; i915_vm_put(ce->vm); ce->vm = i915_gem_context_get_eb_vm(ctx); if (ctx->sched.priority >= I915_PRIORITY_NORMAL && intel_engine_has_timeslices(ce->engine) && intel_engine_has_semaphores(ce->engine)) __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); if (CONFIG_DRM_I915_REQUEST_TIMEOUT && ctx->i915->params.request_timeout_ms) { unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); } /* A valid SSEU has no zero fields */ if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) ret = intel_context_reconfigure_sseu(ce, sseu); return ret; } static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { struct intel_context *ce = e->engines[count], *child; if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) continue; for_each_child(ce, child) intel_context_unpin(child); intel_context_unpin(ce); } } static void unpin_engines(struct i915_gem_engines *e) { __unpin_engines(e, e->num_engines); } static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { if (!e->engines[count]) continue; intel_context_put(e->engines[count]); } kfree(e); } static void free_engines(struct i915_gem_engines *e) { __free_engines(e, e->num_engines); } static void free_engines_rcu(struct rcu_head *rcu) { struct i915_gem_engines *engines = container_of(rcu, struct i915_gem_engines, rcu); i915_sw_fence_fini(&engines->fence); free_engines(engines); } static void accumulate_runtime(struct i915_drm_client *client, struct i915_gem_engines *engines) { struct i915_gem_engines_iter it; struct intel_context *ce; if (!client) return; /* Transfer accumulated runtime to the parent GEM context. */ for_each_gem_engine(ce, engines, it) { unsigned int class = ce->engine->uabi_class; GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); atomic64_add(intel_context_get_total_runtime_ns(ce), &client->past_runtime[class]); } } static int engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_gem_engines *engines = container_of(fence, typeof(*engines), fence); struct i915_gem_context *ctx = engines->ctx; switch (state) { case FENCE_COMPLETE: if (!list_empty(&engines->link)) { unsigned long flags; spin_lock_irqsave(&ctx->stale.lock, flags); list_del(&engines->link); spin_unlock_irqrestore(&ctx->stale.lock, flags); } accumulate_runtime(ctx->client, engines); i915_gem_context_put(ctx); break; case FENCE_FREE: init_rcu_head(&engines->rcu); call_rcu(&engines->rcu, free_engines_rcu); break; } return NOTIFY_DONE; } static struct i915_gem_engines *alloc_engines(unsigned int count) { struct i915_gem_engines *e; e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); if (!e) return NULL; i915_sw_fence_init(&e->fence, engines_notify); return e; } static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, struct intel_sseu rcs_sseu) { const unsigned int max = I915_NUM_ENGINES; struct intel_engine_cs *engine; struct i915_gem_engines *e, *err; e = alloc_engines(max); if (!e) return ERR_PTR(-ENOMEM); for_each_uabi_engine(engine, ctx->i915) { struct intel_context *ce; struct intel_sseu sseu = {}; int ret; if (engine->legacy_idx == INVALID_ENGINE) continue; GEM_BUG_ON(engine->legacy_idx >= max); GEM_BUG_ON(e->engines[engine->legacy_idx]); ce = intel_context_create(engine); if (IS_ERR(ce)) { err = ERR_CAST(ce); goto free_engines; } e->engines[engine->legacy_idx] = ce; e->num_engines = max(e->num_engines, engine->legacy_idx + 1); if (engine->class == RENDER_CLASS) sseu = rcs_sseu; ret = intel_context_set_gem(ce, ctx, sseu); if (ret) { err = ERR_PTR(ret); goto free_engines; } } return e; free_engines: free_engines(e); return err; } static int perma_pin_contexts(struct intel_context *ce) { struct intel_context *child; int i = 0, j = 0, ret; GEM_BUG_ON(!intel_context_is_parent(ce)); ret = intel_context_pin(ce); if (unlikely(ret)) return ret; for_each_child(ce, child) { ret = intel_context_pin(child); if (unlikely(ret)) goto unwind; ++i; } set_bit(CONTEXT_PERMA_PIN, &ce->flags); return 0; unwind: intel_context_unpin(ce); for_each_child(ce, child) { if (j++ < i) intel_context_unpin(child); else break; } return ret; } static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, unsigned int num_engines, struct i915_gem_proto_engine *pe) { struct i915_gem_engines *e, *err; unsigned int n; e = alloc_engines(num_engines); if (!e) return ERR_PTR(-ENOMEM); e->num_engines = num_engines; for (n = 0; n < num_engines; n++) { struct intel_context *ce, *child; int ret; switch (pe[n].type) { case I915_GEM_ENGINE_TYPE_PHYSICAL: ce = intel_context_create(pe[n].engine); break; case I915_GEM_ENGINE_TYPE_BALANCED: ce = intel_engine_create_virtual(pe[n].siblings, pe[n].num_siblings, 0); break; case I915_GEM_ENGINE_TYPE_PARALLEL: ce = intel_engine_create_parallel(pe[n].siblings, pe[n].num_siblings, pe[n].width); break; case I915_GEM_ENGINE_TYPE_INVALID: default: GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); continue; } if (IS_ERR(ce)) { err = ERR_CAST(ce); goto free_engines; } e->engines[n] = ce; ret = intel_context_set_gem(ce, ctx, pe->sseu); if (ret) { err = ERR_PTR(ret); goto free_engines; } for_each_child(ce, child) { ret = intel_context_set_gem(child, ctx, pe->sseu); if (ret) { err = ERR_PTR(ret); goto free_engines; } } /* * XXX: Must be done after calling intel_context_set_gem as that * function changes the ring size. The ring is allocated when * the context is pinned. If the ring size is changed after * allocation we have a mismatch of the ring size and will cause * the context to hang. Presumably with a bit of reordering we * could move the perma-pin step to the backend function * intel_engine_create_parallel. */ if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { ret = perma_pin_contexts(ce); if (ret) { err = ERR_PTR(ret); goto free_engines; } } } return e; free_engines: free_engines(e); return err; } static void i915_gem_context_release_work(struct work_struct *work) { struct i915_gem_context *ctx = container_of(work, typeof(*ctx), release_work); struct i915_address_space *vm; trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); spin_lock(&ctx->i915->gem.contexts.lock); list_del(&ctx->link); spin_unlock(&ctx->i915->gem.contexts.lock); if (ctx->syncobj) drm_syncobj_put(ctx->syncobj); vm = ctx->vm; if (vm) i915_vm_put(vm); if (ctx->pxp_wakeref) intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); if (ctx->client) i915_drm_client_put(ctx->client); mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); put_pid(ctx->pid); mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } void i915_gem_context_release(struct kref *ref) { struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); queue_work(ctx->i915->wq, &ctx->release_work); } static inline struct i915_gem_engines * __context_engines_static(const struct i915_gem_context *ctx) { return rcu_dereference_protected(ctx->engines, true); } static void __reset_context(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { intel_gt_handle_error(engine->gt, engine->mask, 0, "context closure in %s", ctx->name); } static bool __cancel_engine(struct intel_engine_cs *engine) { /* * Send a "high priority pulse" down the engine to cause the * current request to be momentarily preempted. (If it fails to * be preempted, it will be reset). As we have marked our context * as banned, any incomplete request, including any running, will * be skipped following the preemption. * * If there is no hangchecking (one of the reasons why we try to * cancel the context) and no forced preemption, there may be no * means by which we reset the GPU and evict the persistent hog. * Ergo if we are unable to inject a preemptive pulse that can * kill the banned context, we fallback to doing a local reset * instead. */ return intel_engine_pulse(engine) == 0; } static struct intel_engine_cs *active_engine(struct intel_context *ce) { struct intel_engine_cs *engine = NULL; struct i915_request *rq; if (intel_context_has_inflight(ce)) return intel_context_inflight(ce); if (!ce->timeline) return NULL; /* * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference * to the request to prevent it being transferred to a new timeline * (and onto a new timeline->requests list). */ rcu_read_lock(); list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { bool found; /* timeline is already completed upto this point? */ if (!i915_request_get_rcu(rq)) break; /* Check with the backend if the request is inflight */ found = true; if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) found = i915_request_active_engine(rq, &engine); i915_request_put(rq); if (found) break; } rcu_read_unlock(); return engine; } static void kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) { struct i915_gem_engines_iter it; struct intel_context *ce; /* * Map the user's engine back to the actual engines; one virtual * engine will be mapped to multiple engines, and using ctx->engine[] * the same engine may be have multiple instances in the user's map. * However, we only care about pending requests, so only include * engines on which there are incomplete requests. */ for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; if ((exit || !persistent) && intel_context_revoke(ce)) continue; /* Already marked. */ /* * Check the current active state of this context; if we * are currently executing on the GPU we need to evict * ourselves. On the other hand, if we haven't yet been * submitted to the GPU or if everything is complete, * we have nothing to do. */ engine = active_engine(ce); /* First attempt to gracefully cancel the context */ if (engine && !__cancel_engine(engine) && (exit || !persistent)) /* * If we are unable to send a preemptive pulse to bump * the context from the GPU, we have to resort to a full * reset. We hope the collateral damage is worth it. */ __reset_context(engines->ctx, engine); } } static void kill_context(struct i915_gem_context *ctx) { struct i915_gem_engines *pos, *next; spin_lock_irq(&ctx->stale.lock); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { if (!i915_sw_fence_await(&pos->fence)) { list_del_init(&pos->link); continue; } spin_unlock_irq(&ctx->stale.lock); kill_engines(pos, !ctx->i915->params.enable_hangcheck, i915_gem_context_is_persistent(ctx)); spin_lock_irq(&ctx->stale.lock); GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); list_safe_reset_next(pos, next, link); list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ i915_sw_fence_complete(&pos->fence); } spin_unlock_irq(&ctx->stale.lock); } static void engines_idle_release(struct i915_gem_context *ctx, struct i915_gem_engines *engines) { struct i915_gem_engines_iter it; struct intel_context *ce; INIT_LIST_HEAD(&engines->link); engines->ctx = i915_gem_context_get(ctx); for_each_gem_engine(ce, engines, it) { int err; /* serialises with execbuf */ intel_context_close(ce); if (!intel_context_pin_if_active(ce)) continue; /* Wait until context is finally scheduled out and retired */ err = i915_sw_fence_await_active(&engines->fence, &ce->active, I915_ACTIVE_AWAIT_BARRIER); intel_context_unpin(ce); if (err) goto kill; } spin_lock_irq(&ctx->stale.lock); if (!i915_gem_context_is_closed(ctx)) list_add_tail(&engines->link, &ctx->stale.engines); spin_unlock_irq(&ctx->stale.lock); kill: if (list_empty(&engines->link)) /* raced, already closed */ kill_engines(engines, true, i915_gem_context_is_persistent(ctx)); i915_sw_fence_commit(&engines->fence); } static void set_closed_name(struct i915_gem_context *ctx) { char *s; /* Replace '[]' with '<>' to indicate closed in debug prints */ s = strrchr(ctx->name, '['); if (!s) return; *s = '<'; s = strchr(s + 1, ']'); if (s) *s = '>'; } static void context_close(struct i915_gem_context *ctx) { struct i915_drm_client *client; /* Flush any concurrent set_engines() */ mutex_lock(&ctx->engines_mutex); unpin_engines(__context_engines_static(ctx)); engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); mutex_unlock(&ctx->engines_mutex); mutex_lock(&ctx->mutex); set_closed_name(ctx); /* * The LUT uses the VMA as a backpointer to unref the object, * so we need to clear the LUT before we close all the VMA (inside * the ppgtt). */ lut_close(ctx); ctx->file_priv = ERR_PTR(-EBADF); client = ctx->client; if (client) { spin_lock(&client->ctx_lock); list_del_rcu(&ctx->client_link); spin_unlock(&client->ctx_lock); } mutex_unlock(&ctx->mutex); /* * If the user has disabled hangchecking, we can not be sure that * the batches will ever complete after the context is closed, * keeping the context and all resources pinned forever. So in this * case we opt to forcibly kill off all remaining requests on * context close. */ kill_context(ctx); i915_gem_context_put(ctx); } static int __context_set_persistence(struct i915_gem_context *ctx, bool state) { if (i915_gem_context_is_persistent(ctx) == state) return 0; if (state) { /* * Only contexts that are short-lived [that will expire or be * reset] are allowed to survive past termination. We require * hangcheck to ensure that the persistent requests are healthy. */ if (!ctx->i915->params.enable_hangcheck) return -EINVAL; i915_gem_context_set_persistence(ctx); } else { /* To cancel a context we use "preempt-to-idle" */ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) return -ENODEV; /* * If the cancel fails, we then need to reset, cleanly! * * If the per-engine reset fails, all hope is lost! We resort * to a full GPU reset in that unlikely case, but realistically * if the engine could not reset, the full reset does not fare * much better. The damage has been done. * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing * colateral damage, and we should not pretend we can by * exposing the interface. */ if (!intel_has_reset_engine(to_gt(ctx->i915))) return -ENODEV; i915_gem_context_clear_persistence(ctx); } return 0; } static struct i915_gem_context * i915_gem_create_context(struct drm_i915_private *i915, const struct i915_gem_proto_context *pc) { struct i915_gem_context *ctx; struct i915_address_space *vm = NULL; struct i915_gem_engines *e; int err; int i; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); ctx->i915 = i915; ctx->sched = pc->sched; mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->link); INIT_WORK(&ctx->release_work, i915_gem_context_release_work); spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); if (pc->vm) { vm = i915_vm_get(pc->vm); } else if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) { drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); err = PTR_ERR(ppgtt); goto err_ctx; } vm = &ppgtt->vm; } if (vm) ctx->vm = vm; mutex_init(&ctx->engines_mutex); if (pc->num_user_engines >= 0) { i915_gem_context_set_user_engines(ctx); e = user_engines(ctx, pc->num_user_engines, pc->user_engines); } else { i915_gem_context_clear_user_engines(ctx); e = default_engines(ctx, pc->legacy_rcs_sseu); } if (IS_ERR(e)) { err = PTR_ERR(e); goto err_vm; } RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); mutex_init(&ctx->lut_mutex); /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(i915); ctx->user_flags = pc->user_flags; for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; if (pc->single_timeline) { err = drm_syncobj_create(&ctx->syncobj, DRM_SYNCOBJ_CREATE_SIGNALED, NULL); if (err) goto err_engines; } if (pc->uses_protected_content) { ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx->uses_protected_content = true; } trace_i915_context_create(ctx); return ctx; err_engines: free_engines(e); err_vm: if (ctx->vm) i915_vm_put(ctx->vm); err_ctx: kfree(ctx); return ERR_PTR(err); } static void init_contexts(struct i915_gem_contexts *gc) { spin_lock_init(&gc->lock); INIT_LIST_HEAD(&gc->list); } void i915_gem_init__contexts(struct drm_i915_private *i915) { init_contexts(&i915->gem.contexts); } /* * Note that this implicitly consumes the ctx reference, by placing * the ctx in the context_xa. */ static void gem_context_register(struct i915_gem_context *ctx, struct drm_i915_file_private *fpriv, u32 id) { struct drm_i915_private *i915 = ctx->i915; void *old; ctx->file_priv = fpriv; ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->client = i915_drm_client_get(fpriv->client); snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", current->comm, pid_nr(ctx->pid)); spin_lock(&ctx->client->ctx_lock); list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); spin_unlock(&ctx->client->ctx_lock); spin_lock(&i915->gem.contexts.lock); list_add_tail(&ctx->link, &i915->gem.contexts.list); spin_unlock(&i915->gem.contexts.lock); /* And finally expose ourselves to userspace via the idr */ old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); WARN_ON(old); } int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; int err; mutex_init(&file_priv->proto_context_lock); xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); /* 0 reserved for the default context */ xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); /* 0 reserved for invalid/unassigned ppgtt */ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); pc = proto_context_create(i915, 0); if (IS_ERR(pc)) { err = PTR_ERR(pc); goto err; } ctx = i915_gem_create_context(i915, pc); proto_context_close(i915, pc); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err; } gem_context_register(ctx, file_priv, 0); return 0; err: xa_destroy(&file_priv->vm_xa); xa_destroy(&file_priv->context_xa); xa_destroy(&file_priv->proto_context_xa); mutex_destroy(&file_priv->proto_context_lock); return err; } void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_proto_context *pc; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; xa_for_each(&file_priv->proto_context_xa, idx, pc) proto_context_close(file_priv->i915, pc); xa_destroy(&file_priv->proto_context_xa); mutex_destroy(&file_priv->proto_context_lock); xa_for_each(&file_priv->context_xa, idx, ctx) context_close(ctx); xa_destroy(&file_priv->context_xa); xa_for_each(&file_priv->vm_xa, idx, vm) i915_vm_put(vm); xa_destroy(&file_priv->vm_xa); } int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_vm_control *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_ppgtt *ppgtt; u32 id; int err; if (!HAS_FULL_PPGTT(i915)) return -ENODEV; if (args->flags) return -EINVAL; ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); if (args->extensions) { err = i915_user_extensions(u64_to_user_ptr(args->extensions), NULL, 0, ppgtt); if (err) goto err_put; } err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, xa_limit_32b, GFP_KERNEL); if (err) goto err_put; GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ args->vm_id = id; return 0; err_put: i915_vm_put(&ppgtt->vm); return err; } int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_vm_control *args = data; struct i915_address_space *vm; if (args->flags) return -EINVAL; if (args->extensions) return -EINVAL; vm = xa_erase(&file_priv->vm_xa, args->vm_id); if (!vm) return -ENOENT; i915_vm_put(vm); return 0; } static int get_ppgtt(struct drm_i915_file_private *file_priv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { struct i915_address_space *vm; int err; u32 id; if (!i915_gem_context_has_full_ppgtt(ctx)) return -ENODEV; vm = ctx->vm; GEM_BUG_ON(!vm); /* * Get a reference for the allocated handle. Once the handle is * visible in the vm_xa table, userspace could try to close it * from under our feet, so we need to hold the extra reference * first. */ i915_vm_get(vm); err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); if (err) { i915_vm_put(vm); return err; } GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ args->value = id; args->size = 0; return err; } int i915_gem_user_to_context_sseu(struct intel_gt *gt, const struct drm_i915_gem_context_param_sseu *user, struct intel_sseu *context) { const struct sseu_dev_info *device = &gt->info.sseu; struct drm_i915_private *i915 = gt->i915; unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0); /* No zeros in any field. */ if (!user->slice_mask || !user->subslice_mask || !user->min_eus_per_subslice || !user->max_eus_per_subslice) return -EINVAL; /* Max > min. */ if (user->max_eus_per_subslice < user->min_eus_per_subslice) return -EINVAL; /* * Some future proofing on the types since the uAPI is wider than the * current internal implementation. */ if (overflows_type(user->slice_mask, context->slice_mask) || overflows_type(user->subslice_mask, context->subslice_mask) || overflows_type(user->min_eus_per_subslice, context->min_eus_per_subslice) || overflows_type(user->max_eus_per_subslice, context->max_eus_per_subslice)) return -EINVAL; /* Check validity against hardware. */ if (user->slice_mask & ~device->slice_mask) return -EINVAL; if (user->subslice_mask & ~dev_subslice_mask) return -EINVAL; if (user->max_eus_per_subslice > device->max_eus_per_subslice) return -EINVAL; context->slice_mask = user->slice_mask; context->subslice_mask = user->subslice_mask; context->min_eus_per_subslice = user->min_eus_per_subslice; context->max_eus_per_subslice = user->max_eus_per_subslice; /* Part specific restrictions. */ if (GRAPHICS_VER(i915) == 11) { unsigned int hw_s = hweight8(device->slice_mask); unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); unsigned int req_s = hweight8(context->slice_mask); unsigned int req_ss = hweight8(context->subslice_mask); /* * Only full subslice enablement is possible if more than one * slice is turned on. */ if (req_s > 1 && req_ss != hw_ss_per_s) return -EINVAL; /* * If more than four (SScount bitfield limit) subslices are * requested then the number has to be even. */ if (req_ss > 4 && (req_ss & 1)) return -EINVAL; /* * If only one slice is enabled and subslice count is below the * device full enablement, it must be at most half of the all * available subslices. */ if (req_s == 1 && req_ss < hw_ss_per_s && req_ss > (hw_ss_per_s / 2)) return -EINVAL; /* ABI restriction - VME use case only. */ /* All slices or one slice only. */ if (req_s != 1 && req_s != hw_s) return -EINVAL; /* * Half subslices or full enablement only when one slice is * enabled. */ if (req_s == 1 && (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) return -EINVAL; /* No EU configuration changes. */ if ((user->min_eus_per_subslice != device->max_eus_per_subslice) || (user->max_eus_per_subslice != device->max_eus_per_subslice)) return -EINVAL; } return 0; } static int set_sseu(struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_context_param_sseu user_sseu; struct intel_context *ce; struct intel_sseu sseu; unsigned long lookup; int ret; if (args->size < sizeof(user_sseu)) return -EINVAL; if (GRAPHICS_VER(i915) != 11) return -ENODEV; if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), sizeof(user_sseu))) return -EFAULT; if (user_sseu.rsvd) return -EINVAL; if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) return -EINVAL; lookup = 0; if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) lookup |= LOOKUP_USER_INDEX; ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); if (IS_ERR(ce)) return PTR_ERR(ce); /* Only render engine supports RPCS configuration. */ if (ce->engine->class != RENDER_CLASS) { ret = -ENODEV; goto out_ce; } ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); if (ret) goto out_ce; ret = intel_context_reconfigure_sseu(ce, sseu); if (ret) goto out_ce; args->size = sizeof(user_sseu); out_ce: intel_context_put(ce); return ret; } static int set_persistence(struct i915_gem_context *ctx, const struct drm_i915_gem_context_param *args) { if (args->size) return -EINVAL; return __context_set_persistence(ctx, args->value); } static int set_priority(struct i915_gem_context *ctx, const struct drm_i915_gem_context_param *args) { struct i915_gem_engines_iter it; struct intel_context *ce; int err; err = validate_priority(ctx->i915, args); if (err) return err; ctx->sched.priority = args->value; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (!intel_engine_has_timeslices(ce->engine)) continue; if (ctx->sched.priority >= I915_PRIORITY_NORMAL && intel_engine_has_semaphores(ce->engine)) intel_context_set_use_semaphores(ce); else intel_context_clear_use_semaphores(ce); } i915_gem_context_unlock_engines(ctx); return 0; } static int get_protected(struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { args->size = 0; args->value = i915_gem_context_uses_protected_content(ctx); return 0; } static int ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { int ret = 0; switch (args->param) { case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size) ret = -EINVAL; else if (args->value) i915_gem_context_set_no_error_capture(ctx); else i915_gem_context_clear_no_error_capture(ctx); break; case I915_CONTEXT_PARAM_BANNABLE: if (args->size) ret = -EINVAL; else if (!capable(CAP_SYS_ADMIN) && !args->value) ret = -EPERM; else if (args->value) i915_gem_context_set_bannable(ctx); else if (i915_gem_context_uses_protected_content(ctx)) ret = -EPERM; /* can't clear this for protected contexts */ else i915_gem_context_clear_bannable(ctx); break; case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; else if (!args->value) i915_gem_context_clear_recoverable(ctx); else if (i915_gem_context_uses_protected_content(ctx)) ret = -EPERM; /* can't set this for protected contexts */ else i915_gem_context_set_recoverable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: ret = set_priority(ctx, args); break; case I915_CONTEXT_PARAM_SSEU: ret = set_sseu(ctx, args); break; case I915_CONTEXT_PARAM_PERSISTENCE: ret = set_persistence(ctx, args); break; case I915_CONTEXT_PARAM_PROTECTED_CONTENT: case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: case I915_CONTEXT_PARAM_VM: case I915_CONTEXT_PARAM_ENGINES: default: ret = -EINVAL; break; } return ret; } struct create_ext { struct i915_gem_proto_context *pc; struct drm_i915_file_private *fpriv; }; static int create_setparam(struct i915_user_extension __user *ext, void *data) { struct drm_i915_gem_context_create_ext_setparam local; const struct create_ext *arg = data; if (copy_from_user(&local, ext, sizeof(local))) return -EFAULT; if (local.param.ctx_id) return -EINVAL; return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); } static int invalid_ext(struct i915_user_extension __user *ext, void *data) { return -EINVAL; } static const i915_user_extension_fn create_extensions[] = { [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, }; static bool client_is_banned(struct drm_i915_file_private *file_priv) { return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; } static inline struct i915_gem_context * __context_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_gem_context *ctx; rcu_read_lock(); ctx = xa_load(&file_priv->context_xa, id); if (ctx && !kref_get_unless_zero(&ctx->ref)) ctx = NULL; rcu_read_unlock(); return ctx; } static struct i915_gem_context * finalize_create_context_locked(struct drm_i915_file_private *file_priv, struct i915_gem_proto_context *pc, u32 id) { struct i915_gem_context *ctx; void *old; lockdep_assert_held(&file_priv->proto_context_lock); ctx = i915_gem_create_context(file_priv->i915, pc); if (IS_ERR(ctx)) return ctx; /* * One for the xarray and one for the caller. We need to grab * the reference *prior* to making the ctx visble to userspace * in gem_context_register(), as at any point after that * userspace can try to race us with another thread destroying * the context under our feet. */ i915_gem_context_get(ctx); gem_context_register(ctx, file_priv, id); old = xa_erase(&file_priv->proto_context_xa, id); GEM_BUG_ON(old != pc); proto_context_close(file_priv->i915, pc); return ctx; } struct i915_gem_context * i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; ctx = __context_lookup(file_priv, id); if (ctx) return ctx; mutex_lock(&file_priv->proto_context_lock); /* Try one more time under the lock */ ctx = __context_lookup(file_priv, id); if (!ctx) { pc = xa_load(&file_priv->proto_context_xa, id); if (!pc) ctx = ERR_PTR(-ENOENT); else ctx = finalize_create_context_locked(file_priv, pc, id); } mutex_unlock(&file_priv->proto_context_lock); return ctx; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_context_create_ext *args = data; struct create_ext ext_data; int ret; u32 id; if (!DRIVER_CAPS(i915)->has_logical_contexts) return -ENODEV; if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; ret = intel_gt_terminally_wedged(to_gt(i915)); if (ret) return ret; ext_data.fpriv = file->driver_priv; if (client_is_banned(ext_data.fpriv)) { drm_dbg(&i915->drm, "client %s[%d] banned from creating ctx\n", current->comm, task_pid_nr(current)); return -EIO; } ext_data.pc = proto_context_create(i915, args->flags); if (IS_ERR(ext_data.pc)) return PTR_ERR(ext_data.pc); if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { ret = i915_user_extensions(u64_to_user_ptr(args->extensions), create_extensions, ARRAY_SIZE(create_extensions), &ext_data); if (ret) goto err_pc; } if (GRAPHICS_VER(i915) > 12) { struct i915_gem_context *ctx; /* Get ourselves a context ID */ ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, xa_limit_32b, GFP_KERNEL); if (ret) goto err_pc; ctx = i915_gem_create_context(i915, ext_data.pc); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto err_pc; } proto_context_close(i915, ext_data.pc); gem_context_register(ctx, ext_data.fpriv, id); } else { ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); if (ret < 0) goto err_pc; } args->ctx_id = id; return 0; err_pc: proto_context_close(i915, ext_data.pc); return ret; } int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; if (args->pad != 0) return -EINVAL; if (!args->ctx_id) return -ENOENT; /* We need to hold the proto-context lock here to prevent races * with finalize_create_context_locked(). */ mutex_lock(&file_priv->proto_context_lock); ctx = xa_erase(&file_priv->context_xa, args->ctx_id); pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); mutex_unlock(&file_priv->proto_context_lock); if (!ctx && !pc) return -ENOENT; GEM_WARN_ON(ctx && pc); if (pc) proto_context_close(file_priv->i915, pc); if (ctx) context_close(ctx); return 0; } static int get_sseu(struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { struct drm_i915_gem_context_param_sseu user_sseu; struct intel_context *ce; unsigned long lookup; int err; if (args->size == 0) goto out; else if (args->size < sizeof(user_sseu)) return -EINVAL; if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), sizeof(user_sseu))) return -EFAULT; if (user_sseu.rsvd) return -EINVAL; if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) return -EINVAL; lookup = 0; if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) lookup |= LOOKUP_USER_INDEX; ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); if (IS_ERR(ce)) return PTR_ERR(ce); err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ if (err) { intel_context_put(ce); return err; } user_sseu.slice_mask = ce->sseu.slice_mask; user_sseu.subslice_mask = ce->sseu.subslice_mask; user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; intel_context_unlock_pinned(ce); intel_context_put(ce); if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, sizeof(user_sseu))) return -EFAULT; out: args->size = sizeof(user_sseu); return 0; } int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; struct i915_address_space *vm; int ret = 0; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) return PTR_ERR(ctx); switch (args->param) { case I915_CONTEXT_PARAM_GTT_SIZE: args->size = 0; vm = i915_gem_context_get_eb_vm(ctx); args->value = vm->total; i915_vm_put(vm); break; case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: args->size = 0; args->value = i915_gem_context_no_error_capture(ctx); break; case I915_CONTEXT_PARAM_BANNABLE: args->size = 0; args->value = i915_gem_context_is_bannable(ctx); break; case I915_CONTEXT_PARAM_RECOVERABLE: args->size = 0; args->value = i915_gem_context_is_recoverable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: args->size = 0; args->value = ctx->sched.priority; break; case I915_CONTEXT_PARAM_SSEU: ret = get_sseu(ctx, args); break; case I915_CONTEXT_PARAM_VM: ret = get_ppgtt(file_priv, ctx, args); break; case I915_CONTEXT_PARAM_PERSISTENCE: args->size = 0; args->value = i915_gem_context_is_persistent(ctx); break; case I915_CONTEXT_PARAM_PROTECTED_CONTENT: ret = get_protected(ctx, args); break; case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_ENGINES: case I915_CONTEXT_PARAM_RINGSIZE: default: ret = -EINVAL; break; } i915_gem_context_put(ctx); return ret; } int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; int ret = 0; mutex_lock(&file_priv->proto_context_lock); ctx = __context_lookup(file_priv, args->ctx_id); if (!ctx) { pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); if (pc) { /* Contexts should be finalized inside * GEM_CONTEXT_CREATE starting with graphics * version 13. */ WARN_ON(GRAPHICS_VER(file_priv->i915) > 12); ret = set_proto_ctx_param(file_priv, pc, args); } else { ret = -ENOENT; } } mutex_unlock(&file_priv->proto_context_lock); if (ctx) { ret = ctx_setparam(file_priv, ctx, args); i915_gem_context_put(ctx); } return ret; } int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_reset_stats *args = data; struct i915_gem_context *ctx; if (args->flags || args->pad) return -EINVAL; ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); if (IS_ERR(ctx)) return PTR_ERR(ctx); /* * We opt for unserialised reads here. This may result in tearing * in the extremely unlikely event of a GPU hang on this context * as we are querying them. If we need that extra layer of protection, * we should wrap the hangstats with a seqlock. */ if (capable(CAP_SYS_ADMIN)) args->reset_count = i915_reset_count(&i915->gpu_error); else args->reset_count = 0; args->batch_active = atomic_read(&ctx->guilty_count); args->batch_pending = atomic_read(&ctx->active_count); i915_gem_context_put(ctx); return 0; } /* GEM context-engines iterator: for_each_gem_engine() */ struct intel_context * i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) { const struct i915_gem_engines *e = it->engines; struct intel_context *ctx; if (unlikely(!e)) return NULL; do { if (it->idx >= e->num_engines) return NULL; ctx = e->engines[it->idx++]; } while (!ctx); return ctx; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" #endif void i915_gem_context_module_exit(void) { kmem_cache_destroy(slab_luts); } int __init i915_gem_context_module_init(void) { slab_luts = KMEM_CACHE(i915_lut_handle, 0); if (!slab_luts) return -ENOMEM; return 0; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_context.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2008 Intel Corporation */ #include <linux/string.h> #include <linux/bitops.h> #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_ioctls.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_gem_tiling.h" #include "i915_reg.h" /** * DOC: buffer object tiling * * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace * interface to declare fence register requirements. * * In principle GEM doesn't care at all about the internal data layout of an * object, and hence it also doesn't care about tiling or swizzling. There's two * exceptions: * * - For X and Y tiling the hardware provides detilers for CPU access, so called * fences. Since there's only a limited amount of them the kernel must manage * these, and therefore userspace must tell the kernel the object tiling if it * wants to use fences for detiling. * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which * depends upon the physical page frame number. When swapping such objects the * page frame number might change and the kernel must be able to fix this up * and hence now the tiling. Note that on a subset of platforms with * asymmetric memory channel population the swizzling pattern changes in an * unknown way, and for those the kernel simply forbids swapping completely. * * Since neither of this applies for new tiling layouts on modern platforms like * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. * Anything else can be handled in userspace entirely without the kernel's * invovlement. */ /** * i915_gem_fence_size - required global GTT size for a fence * @i915: i915 device * @size: object size * @tiling: tiling mode * @stride: tiling stride * * Return the required global GTT size for a fence (view of a tiled object), * taking into account potential fence register mapping. */ u32 i915_gem_fence_size(struct drm_i915_private *i915, u32 size, unsigned int tiling, unsigned int stride) { u32 ggtt_size; GEM_BUG_ON(!size); if (tiling == I915_TILING_NONE) return size; GEM_BUG_ON(!stride); if (GRAPHICS_VER(i915) >= 4) { stride *= i915_gem_tile_height(tiling); GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE)); return roundup(size, stride); } /* Previous chips need a power-of-two fence region when tiling */ if (GRAPHICS_VER(i915) == 3) ggtt_size = 1024*1024; else ggtt_size = 512*1024; while (ggtt_size < size) ggtt_size <<= 1; return ggtt_size; } /** * i915_gem_fence_alignment - required global GTT alignment for a fence * @i915: i915 device * @size: object size * @tiling: tiling mode * @stride: tiling stride * * Return the required global GTT alignment for a fence (a view of a tiled * object), taking into account potential fence register mapping. */ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, unsigned int tiling, unsigned int stride) { GEM_BUG_ON(!size); /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (tiling == I915_TILING_NONE) return I915_GTT_MIN_ALIGNMENT; if (GRAPHICS_VER(i915) >= 4) return I965_FENCE_PAGE; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ return i915_gem_fence_size(i915, size, tiling, stride); } /* Check pitch constraints for all chips & tiling formats */ static bool i915_tiling_ok(struct drm_i915_gem_object *obj, unsigned int tiling, unsigned int stride) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int tile_width; /* Linear is always fine */ if (tiling == I915_TILING_NONE) return true; if (tiling > I915_TILING_LAST) return false; /* check maximum stride & object size */ /* i965+ stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ if (GRAPHICS_VER(i915) >= 7) { if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) return false; } else if (GRAPHICS_VER(i915) >= 4) { if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; } else { if (stride > 8192) return false; if (!is_power_of_2(stride)) return false; } if (GRAPHICS_VER(i915) == 2 || (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) tile_width = 128; else tile_width = 512; if (!stride || !IS_ALIGNED(stride, tile_width)) return false; return true; } static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode, unsigned int stride) { struct drm_i915_private *i915 = vma->vm->i915; u32 size, alignment; if (!i915_vma_is_map_and_fenceable(vma)) return true; size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); if (i915_vma_size(vma) < size) return false; alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment)) return false; return true; } /* Make the current GTT allocation valid for the change in tiling. */ static int i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode, unsigned int stride) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct i915_vma *vma, *vn; LIST_HEAD(unbind); int ret = 0; if (tiling_mode == I915_TILING_NONE) return 0; mutex_lock(&ggtt->vm.mutex); spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { GEM_BUG_ON(vma->vm != &ggtt->vm); if (i915_vma_fence_prepare(vma, tiling_mode, stride)) continue; list_move(&vma->vm_link, &unbind); } spin_unlock(&obj->vma.lock); list_for_each_entry_safe(vma, vn, &unbind, vm_link) { ret = __i915_vma_unbind(vma); if (ret) { /* Restore the remaining vma on an error */ list_splice(&unbind, &ggtt->vm.bound_list); break; } } mutex_unlock(&ggtt->vm.mutex); return ret; } bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && i915_gem_object_is_tiled(obj); } int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, unsigned int tiling, unsigned int stride) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; int err; /* Make sure we don't cross-contaminate obj->tiling_and_stride */ BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride)); GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE)); if ((tiling | stride) == obj->tiling_and_stride) return 0; if (i915_gem_object_is_framebuffer(obj)) return -EBUSY; /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is updated before * the next fenced (either through the GTT or by the BLT unit * on older GPUs) access. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ i915_gem_object_lock(obj, NULL); if (i915_gem_object_is_framebuffer(obj)) { i915_gem_object_unlock(obj); return -EBUSY; } err = i915_gem_object_fence_prepare(obj, tiling, stride); if (err) { i915_gem_object_unlock(obj); return err; } /* If the memory has unknown (i.e. varying) swizzling, we pin the * pages to prevent them being swapped out and causing corruption * due to the change in swizzling. */ if (i915_gem_object_has_pages(obj) && obj->mm.madv == I915_MADV_WILLNEED && i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { if (tiling == I915_TILING_NONE) { GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_clear_tiling_quirk(obj); i915_gem_object_make_shrinkable(obj); } if (!i915_gem_object_is_tiled(obj)) { GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_make_unshrinkable(obj); i915_gem_object_set_tiling_quirk(obj); } } spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { vma->fence_size = i915_gem_fence_size(i915, vma->size, tiling, stride); vma->fence_alignment = i915_gem_fence_alignment(i915, vma->size, tiling, stride); if (vma->fence) vma->fence->dirty = true; } spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (!obj->bit_17) { obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, GFP_KERNEL); } } else { bitmap_free(obj->bit_17); obj->bit_17 = NULL; } i915_gem_object_unlock(obj); /* Force the fence to be reacquired for GTT access */ i915_gem_object_release_mmap_gtt(obj); return 0; } /** * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode * @dev: DRM device * @data: data pointer for the ioctl * @file: DRM file for the ioctl call * * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_object *obj; int err; if (!to_gt(dev_priv)->ggtt->num_fences) return -EOPNOTSUPP; obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; /* * The tiling mode of proxy objects is handled by its generator, and * not allowed to be changed by userspace. */ if (i915_gem_object_is_proxy(obj)) { err = -ENXIO; goto err; } if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) { err = -EINVAL; goto err; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; else args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); /* We have to maintain this existing ABI... */ args->stride = i915_gem_object_get_stride(obj); args->tiling_mode = i915_gem_object_get_tiling(obj); err: i915_gem_object_put(obj); return err; } /** * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode * @dev: DRM device * @data: data pointer for the ioctl * @file: DRM file for the ioctl call * * Returns the current tiling mode and required bit 6 swizzling for the object. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int err = -ENOENT; if (!to_gt(dev_priv)->ggtt->num_fences) return -EOPNOTSUPP; rcu_read_lock(); obj = i915_gem_object_lookup_rcu(file, args->handle); if (obj) { args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK; err = 0; } rcu_read_unlock(); if (unlikely(err)) return err; switch (args->tiling_mode) { case I915_TILING_X: args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; break; default: case I915_TILING_NONE: args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; break; } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; else args->phys_swizzle_mode = args->swizzle_mode; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; return 0; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include "display/intel_display.h" #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_domain.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_lmem.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_vma.h" #define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); if (IS_DGFX(i915)) return false; /* * For objects created by userspace through GEM_CREATE with pat_index * set by set_pat extension, i915_gem_object_has_cache_level() will * always return true, because the coherency of such object is managed * by userspace. Othereise the call here would fall back to checking * whether the object is un-cached or write-through. */ return !(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) || i915_gem_object_has_cache_level(obj, I915_CACHE_WT)); } bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); if (obj->cache_dirty) return false; if (IS_DGFX(i915)) return false; if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) return true; /* Currently in use by HW (display engine)? Keep flushed. */ return i915_gem_object_is_framebuffer(obj); } static void flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { struct i915_vma *vma; assert_object_held(obj); if (!(obj->write_domain & flush_domains)) return; switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) i915_vma_flush_writes(vma); spin_unlock(&obj->vma.lock); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); break; case I915_GEM_DOMAIN_WC: wmb(); break; case I915_GEM_DOMAIN_CPU: i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); break; case I915_GEM_DOMAIN_RENDER: if (gpu_write_needs_clflush(obj)) obj->cache_dirty = true; break; } obj->write_domain = 0; } static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) { /* * We manually flush the CPU domain so that we can override and * force the flush for the display, and perform it asyncrhonously. */ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); if (obj->cache_dirty) i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); obj->write_domain = 0; } void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) { if (!i915_gem_object_is_framebuffer(obj)) return; i915_gem_object_lock(obj, NULL); __i915_gem_object_flush_for_display(obj); i915_gem_object_unlock(obj); } void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj) { if (i915_gem_object_is_framebuffer(obj)) __i915_gem_object_flush_for_display(obj); } /** * i915_gem_object_set_to_wc_domain - Moves a single object to the WC read, and * possibly write domain. * @obj: object to act on * @write: ask for write access or read only * * This function returns when the move is complete, including waiting on * flushes to occur. */ int i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) { int ret; assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) return ret; if (obj->write_domain == I915_GEM_DOMAIN_WC) return 0; /* Flush and acquire obj->pages so that we are coherent through * direct access in memory with previous cached writes through * shmemfs and that our cache domain tracking remains valid. * For example, if the obj->filp was moved to swap without us * being notified and releasing the pages, we would mistakenly * continue to assume that the obj remained out of the CPU cached * domain. */ ret = i915_gem_object_pin_pages(obj); if (ret) return ret; flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the * WC domain upon first access. */ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0) mb(); /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0); obj->read_domains |= I915_GEM_DOMAIN_WC; if (write) { obj->read_domains = I915_GEM_DOMAIN_WC; obj->write_domain = I915_GEM_DOMAIN_WC; obj->mm.dirty = true; } i915_gem_object_unpin_pages(obj); return 0; } /** * i915_gem_object_set_to_gtt_domain - Moves a single object to the GTT read, * and possibly write domain. * @obj: object to act on * @write: ask for write access or read only * * This function returns when the move is complete, including waiting on * flushes to occur. */ int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { int ret; assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) return ret; if (obj->write_domain == I915_GEM_DOMAIN_GTT) return 0; /* Flush and acquire obj->pages so that we are coherent through * direct access in memory with previous cached writes through * shmemfs and that our cache domain tracking remains valid. * For example, if the obj->filp was moved to swap without us * being notified and releasing the pages, we would mistakenly * continue to assume that the obj remained out of the CPU cached * domain. */ ret = i915_gem_object_pin_pages(obj); if (ret) return ret; flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the * GTT domain upon first access. */ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0) mb(); /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->read_domains |= I915_GEM_DOMAIN_GTT; if (write) { struct i915_vma *vma; obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT; obj->mm.dirty = true; spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) i915_vma_set_ggtt_write(vma); spin_unlock(&obj->vma.lock); } i915_gem_object_unpin_pages(obj); return 0; } /** * i915_gem_object_set_cache_level - Changes the cache-level of an object across all VMA. * @obj: object to act on * @cache_level: new cache level to set for the object * * After this function returns, the object will be in the new cache-level * across all GTT and the contents of the backing storage will be coherent, * with respect to the new cache-level. In order to keep the backing storage * coherent for all users, we only allow a single cache level to be set * globally on the object and prevent it from being changed whilst the * hardware is reading from the object. That is if the object is currently * on the scanout it will be set to uncached (or equivalent display * cache coherency) and all non-MOCS GPU access will also be uncached so * that all direct access to the scanout remains coherent. */ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { int ret; /* * For objects created by userspace through GEM_CREATE with pat_index * set by set_pat extension, simply return 0 here without touching * the cache setting, because such objects should have an immutable * cache setting by desgin and always managed by userspace. */ if (i915_gem_object_has_cache_level(obj, cache_level)) return 0; ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; /* Always invalidate stale cachelines */ i915_gem_object_set_cache_coherency(obj, cache_level); obj->cache_dirty = true; /* The cache-level will be applied when each vma is rebound. */ return i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE | I915_GEM_OBJECT_UNBIND_BARRIER); } int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; int err = 0; if (IS_DGFX(to_i915(dev))) return -ENODEV; rcu_read_lock(); obj = i915_gem_object_lookup_rcu(file, args->handle); if (!obj) { err = -ENOENT; goto out; } /* * This ioctl should be disabled for the objects with pat_index * set by user space. */ if (obj->pat_set_by_user) { err = -EOPNOTSUPP; goto out; } if (i915_gem_object_has_cache_level(obj, I915_CACHE_LLC) || i915_gem_object_has_cache_level(obj, I915_CACHE_L3_LLC)) args->caching = I915_CACHING_CACHED; else if (i915_gem_object_has_cache_level(obj, I915_CACHE_WT)) args->caching = I915_CACHING_DISPLAY; else args->caching = I915_CACHING_NONE; out: rcu_read_unlock(); return err; } int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; enum i915_cache_level level; int ret = 0; if (IS_DGFX(i915)) return -ENODEV; if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) return -EOPNOTSUPP; switch (args->caching) { case I915_CACHING_NONE: level = I915_CACHE_NONE; break; case I915_CACHING_CACHED: /* * Due to a HW issue on BXT A stepping, GPU stores via a * snooped mapping may leave stale data in a corresponding CPU * cacheline, whereas normally such cachelines would get * invalidated. */ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) return -ENODEV; level = I915_CACHE_LLC; break; case I915_CACHING_DISPLAY: level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; break; default: return -EINVAL; } obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; /* * This ioctl should be disabled for the objects with pat_index * set by user space. */ if (obj->pat_set_by_user) { ret = -EOPNOTSUPP; goto out; } /* * The caching mode of proxy object is handled by its generator, and * not allowed to be changed by userspace. */ if (i915_gem_object_is_proxy(obj)) { /* * Silently allow cached for userptr; the vulkan driver * sets all objects to cached */ if (!i915_gem_object_is_userptr(obj) || args->caching != I915_CACHING_CACHED) ret = -ENXIO; goto out; } ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) goto out; ret = i915_gem_object_set_cache_level(obj, level); i915_gem_object_unlock(obj); out: i915_gem_object_put(obj); return ret; } /* * Prepare buffer for display plane (scanout, cursors, etc). Can be called from * an uninterruptible phase (modesetting) and allows any flushes to be pipelined * (for pageflips). We only flush the caches while preparing the buffer for * display, the callers are responsible for frontbuffer flush. */ struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, u32 alignment, const struct i915_gtt_view *view, unsigned int flags) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; int ret; /* Frame buffer must be in LMEM */ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) return ERR_PTR(-EINVAL); /* * The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is * done with uncached PTEs. This is lowest common denominator for all * chipsets. * * However for gen6+, we could do better by using the GFDT bit instead * of uncaching, which would allow us to flush all the LLC-cached data * with that bit in the PTE to main memory with just one PIPE_CONTROL. */ ret = i915_gem_object_set_cache_level(obj, HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE); if (ret) return ERR_PTR(ret); /* VT-d may overfetch before/after the vma, so pad with scratch */ if (intel_scanout_needs_vtd_wa(i915)) { unsigned int guard = VTD_GUARD; if (i915_gem_object_is_tiled(obj)) guard = max(guard, i915_gem_object_get_tile_row_size(obj)); flags |= PIN_OFFSET_GUARD | guard; } /* * As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. However, * it may simply be too big to fit into mappable, in which case * put it anyway and hope that userspace can cope (but always first * try to preserve the existing ABI). */ vma = ERR_PTR(-ENOSPC); if ((flags & PIN_MAPPABLE) == 0 && (!view || view->type == I915_GTT_VIEW_NORMAL)) vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment, flags | PIN_MAPPABLE | PIN_NONBLOCK); if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment, flags); if (IS_ERR(vma)) return vma; vma->display_alignment = max(vma->display_alignment, alignment); i915_vma_mark_scanout(vma); i915_gem_object_flush_if_display_locked(obj); return vma; } /** * i915_gem_object_set_to_cpu_domain - Moves a single object to the CPU read, * and possibly write domain. * @obj: object to act on * @write: requesting write or read-only access * * This function returns when the move is complete, including waiting on * flushes to occur. */ int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { int ret; assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) return ret; flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); obj->read_domains |= I915_GEM_DOMAIN_CPU; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); /* If we're writing through the CPU, then the GPU read domains will * need to be invalidated at next use. */ if (write) __start_cpu_write(obj); return 0; } /** * i915_gem_set_domain_ioctl - Called when user space prepares to use an * object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. * @dev: drm device * @data: ioctl data blob * @file: drm file */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_object *obj; u32 read_domains = args->read_domains; u32 write_domain = args->write_domain; int err; if (IS_DGFX(to_i915(dev))) return -ENODEV; /* Only handle setting domains to types used by the CPU. */ if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) return -EINVAL; /* * Having something in the write domain implies it's in the read * domain, and only that read domain. Enforce that in the request. */ if (write_domain && read_domains != write_domain) return -EINVAL; if (!read_domains) return 0; obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; /* * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (err) goto out; if (i915_gem_object_is_userptr(obj)) { /* * Try to grab userptr pages, iris uses set_domain to check * userptr validity */ err = i915_gem_object_userptr_validate(obj); if (!err) err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); goto out; } /* * Proxy objects do not control access to the backing storage, ergo * they cannot be used as a means to manipulate the cache domain * tracking for that backing storage. The proxy object is always * considered to be outside of any cache domain. */ if (i915_gem_object_is_proxy(obj)) { err = -ENXIO; goto out; } err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out; /* * Flush and acquire obj->pages so that we are coherent through * direct access in memory with previous cached writes through * shmemfs and that our cache domain tracking remains valid. * For example, if the obj->filp was moved to swap without us * being notified and releasing the pages, we would mistakenly * continue to assume that the obj remained out of the CPU cached * domain. */ err = i915_gem_object_pin_pages(obj); if (err) goto out_unlock; /* * Already in the desired write domain? Nothing for us to do! * * We apply a little bit of cunning here to catch a broader set of * no-ops. If obj->write_domain is set, we must be in the same * obj->read_domains, and only that domain. Therefore, if that * obj->write_domain matches the request read_domains, we are * already in the same read/write domain and can skip the operation, * without having to further check the requested write_domain. */ if (READ_ONCE(obj->write_domain) == read_domains) goto out_unpin; if (read_domains & I915_GEM_DOMAIN_WC) err = i915_gem_object_set_to_wc_domain(obj, write_domain); else if (read_domains & I915_GEM_DOMAIN_GTT) err = i915_gem_object_set_to_gtt_domain(obj, write_domain); else err = i915_gem_object_set_to_cpu_domain(obj, write_domain); out_unpin: i915_gem_object_unpin_pages(obj); out_unlock: i915_gem_object_unlock(obj); if (!err && write_domain) i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); out: i915_gem_object_put(obj); return err; } /* * Pins the specified object's pages and synchronizes the object with * GPU accesses. Sets needs_clflush to non-zero if the caller should * flush the object from the CPU cache. */ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, unsigned int *needs_clflush) { int ret; *needs_clflush = 0; if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; ret = i915_gem_object_pin_pages(obj); if (ret) return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || !static_cpu_has(X86_FEATURE_CLFLUSH)) { ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) goto err_unpin; else goto out; } flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu read domain, set ourself into the gtt * read domain and manually flush cachelines (if required). This * optimizes for the case when the gpu will dirty the data * anyway again before the next pread happens. */ if (!obj->cache_dirty && !(obj->read_domains & I915_GEM_DOMAIN_CPU)) *needs_clflush = CLFLUSH_BEFORE; out: /* return with the pages pinned */ return 0; err_unpin: i915_gem_object_unpin_pages(obj); return ret; } int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, unsigned int *needs_clflush) { int ret; *needs_clflush = 0; if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; ret = i915_gem_object_pin_pages(obj); if (ret) return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || !static_cpu_has(X86_FEATURE_CLFLUSH)) { ret = i915_gem_object_set_to_cpu_domain(obj, true); if (ret) goto err_unpin; else goto out; } flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu write domain, set ourself into the * gtt write domain and manually flush cachelines (as required). * This optimizes for the case when the gpu will use the data * right away and we therefore have to clflush anyway. */ if (!obj->cache_dirty) { *needs_clflush |= CLFLUSH_AFTER; /* * Same trick applies to invalidate partially written * cachelines read before writing. */ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU)) *needs_clflush |= CLFLUSH_BEFORE; } out: i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); obj->mm.dirty = true; /* return with the pages pinned */ return 0; err_unpin: i915_gem_object_unpin_pages(obj); return ret; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_domain.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/dma-fence-array.h> #include "gt/intel_engine.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" static __always_inline u32 __busy_read_flag(u16 id) { if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffff0000u; GEM_BUG_ON(id >= 16); return 0x10000u << id; } static __always_inline u32 __busy_write_id(u16 id) { /* * The uABI guarantees an active writer is also amongst the read * engines. This would be true if we accessed the activity tracking * under the lock, but as we perform the lookup of the object and * its activity locklessly we can not guarantee that the last_write * being active implies that we have set the same engine flag from * last_read - hence we always set both read and write busy for * last_write. */ if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffffffffu; return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int __busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; /* * We have to check the current hw status of the fence as the uABI * guarantees forward progress. We could rely on the idle worker * to eventually flush us, but to minimise latency just ask the * hardware. * * Note we only report on the status of native fences and we currently * have two native fences: * * 1. A composite fence (dma_fence_array) constructed of i915 requests * created during a parallel submission. In this case we deconstruct the * composite fence into individual i915 requests and check the status of * each request. * * 2. A single i915 request. */ if (dma_fence_is_array(fence)) { struct dma_fence_array *array = to_dma_fence_array(fence); struct dma_fence **child = array->fences; unsigned int nchild = array->num_fences; do { struct dma_fence *current_fence = *child++; /* Not an i915 fence, can't be busy per above */ if (!dma_fence_is_i915(current_fence) || !test_bit(I915_FENCE_FLAG_COMPOSITE, &current_fence->flags)) { return 0; } rq = to_request(current_fence); if (!i915_request_completed(rq)) return flag(rq->engine->uabi_class); } while (--nchild); /* All requests in array complete, not busy */ return 0; } else { if (!dma_fence_is_i915(fence)) return 0; rq = to_request(fence); if (i915_request_completed(rq)) return 0; /* Beware type-expansion follies! */ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); return flag(rq->engine->uabi_class); } } static __always_inline unsigned int busy_check_reader(struct dma_fence *fence) { return __busy_set_if_active(fence, __busy_read_flag); } static __always_inline unsigned int busy_check_writer(struct dma_fence *fence) { if (!fence) return 0; return __busy_set_if_active(fence, __busy_write_id); } int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; struct dma_resv_iter cursor; struct dma_fence *fence; int err; err = -ENOENT; rcu_read_lock(); obj = i915_gem_object_lookup_rcu(file, args->handle); if (!obj) goto out; /* * A discrepancy here is that we do not report the status of * non-i915 fences, i.e. even though we may report the object as idle, * a call to set-domain may still stall waiting for foreign rendering. * This also means that wait-ioctl may report an object as busy, * where busy-ioctl considers it idle. * * We trade the ability to warn of foreign fences to report on which * i915 engines are active for the object. * * Alternatively, we can trade that extra information on read/write * activity with * args->busy = * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); * to report the overall busyness. This is what the wait-ioctl does. * */ args->busy = 0; dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) args->busy = 0; if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE) /* Translate the write fences to the READ *and* WRITE engine */ args->busy |= busy_check_writer(fence); else /* Translate read fences to READ set of engines */ args->busy |= busy_check_reader(fence); } dma_resv_iter_end(&cursor); err = 0; out: rcu_read_unlock(); return err; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_busy.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2008-2012 Intel Corporation */ #include <linux/errno.h> #include <linux/mutex.h> #include <drm/drm_mm.h> #include <drm/i915_drm.h> #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "gt/intel_gt.h" #include "gt/intel_gt_mcr.h" #include "gt/intel_gt_regs.h" #include "gt/intel_region_lmem.h" #include "i915_drv.h" #include "i915_gem_stolen.h" #include "i915_pci.h" #include "i915_reg.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_mchbar_regs.h" #include "intel_pci_config.h" /* * The BIOS typically reserves some of the system's memory for the exclusive * use of the integrated graphics. This memory is no longer available for * use by the OS and so the user finds that his system has less memory * available than he put in. We refer to this memory as stolen. * * The BIOS will allocate its framebuffer from the stolen memory. Our * goal is try to reuse that object for our own fbcon which must always * be available for panics. Anything else we can reuse the stolen memory * for is a boon. */ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; if (!drm_mm_initialized(&i915->mm.stolen)) return -ENODEV; /* WaSkipStolenMemoryFirstPage:bdw+ */ if (GRAPHICS_VER(i915) >= 8 && start < 4096) start = 4096; mutex_lock(&i915->mm.stolen_lock); ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, size, alignment, 0, start, end, DRM_MM_INSERT_BEST); mutex_unlock(&i915->mm.stolen_lock); return ret; } int i915_gem_stolen_insert_node(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment) { return i915_gem_stolen_insert_node_in_range(i915, node, size, alignment, I915_GEM_STOLEN_BIAS, U64_MAX); } void i915_gem_stolen_remove_node(struct drm_i915_private *i915, struct drm_mm_node *node) { mutex_lock(&i915->mm.stolen_lock); drm_mm_remove_node(node); mutex_unlock(&i915->mm.stolen_lock); } static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm) { return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start; } static int adjust_stolen(struct drm_i915_private *i915, struct resource *dsm) { struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct intel_uncore *uncore = ggtt->vm.gt->uncore; if (!valid_stolen_size(i915, dsm)) return -EINVAL; /* * Make sure we don't clobber the GTT if it's within stolen memory * * TODO: We have yet too encounter the case where the GTT wasn't at the * end of stolen. With that assumption we could simplify this. */ if (GRAPHICS_VER(i915) <= 4 && !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { struct resource stolen[2] = {*dsm, *dsm}; struct resource ggtt_res; resource_size_t ggtt_start; ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); if (GRAPHICS_VER(i915) == 4) ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else ggtt_start &= PGTBL_ADDRESS_LO_MASK; ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4); if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) stolen[0].end = ggtt_res.start; if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) stolen[1].start = ggtt_res.end; /* Pick the larger of the two chunks */ if (resource_size(&stolen[0]) > resource_size(&stolen[1])) *dsm = stolen[0]; else *dsm = stolen[1]; if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { drm_dbg(&i915->drm, "GTT within stolen memory at %pR\n", &ggtt_res); drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", dsm); } } if (!valid_stolen_size(i915, dsm)) return -EINVAL; return 0; } static int request_smem_stolen(struct drm_i915_private *i915, struct resource *dsm) { struct resource *r; /* * With stolen lmem, we don't need to request system memory for the * address range since it's local to the gpu. * * Starting MTL, in IGFX devices the stolen memory is exposed via * LMEMBAR and shall be considered similar to stolen lmem. */ if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915)) return 0; /* * Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the * kernel. So if the region is already marked as busy, something * is seriously wrong. */ r = devm_request_mem_region(i915->drm.dev, dsm->start, resource_size(dsm), "Graphics Stolen Memory"); if (r == NULL) { /* * One more attempt but this time requesting region from * start + 1, as we have seen that this resolves the region * conflict with the PCI Bus. * This is a BIOS w/a: Some BIOS wrap stolen in the root * PCI bus, but have an off-by-one error. Hence retry the * reservation starting from 1 instead of 0. * There's also BIOS with off-by-one on the other end. */ r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, resource_size(dsm) - 2, "Graphics Stolen Memory"); /* * GEN3 firmware likes to smash pci bridges into the stolen * range. Apparently this works. */ if (!r && GRAPHICS_VER(i915) != 3) { drm_err(&i915->drm, "conflict detected with stolen region: %pR\n", dsm); return -EBUSY; } } return 0; } static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) { if (!drm_mm_initialized(&i915->mm.stolen)) return; drm_mm_takedown(&i915->mm.stolen); } static void g4x_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, IS_GM45(i915) ? CTG_STOLEN_RESERVED : ELK_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", IS_GM45(i915) ? "CTG" : "ELK", reg_val); if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) return; /* * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, "ILK stolen reserved found? 0x%08x\n", reg_val); if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) return; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; drm_WARN_ON(&i915->drm, (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); *size = stolen_top - *base; } static void gen6_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { case GEN6_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN6_STOLEN_RESERVED_512K: *size = 512 * 1024; break; case GEN6_STOLEN_RESERVED_256K: *size = 256 * 1024; break; case GEN6_STOLEN_RESERVED_128K: *size = 128 * 1024; break; default: *size = 1024 * 1024; MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); } } static void vlv_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { default: MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); fallthrough; case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; } /* * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the * reserved location as (top - size). */ *base = stolen_top - *size; } static void gen7_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN7_STOLEN_RESERVED_256K: *size = 256 * 1024; break; default: *size = 1024 * 1024; MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); } } static void chv_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { case GEN8_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN8_STOLEN_RESERVED_2M: *size = 2 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_4M: *size = 4 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_8M: *size = 8 * 1024 * 1024; break; default: *size = 8 * 1024 * 1024; MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); } } static void bdw_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) return; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; *size = stolen_top - *base; } static void icl_get_stolen_reserved(struct drm_i915_private *i915, struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { case GEN8_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; case GEN8_STOLEN_RESERVED_2M: *size = 2 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_4M: *size = 4 * 1024 * 1024; break; case GEN8_STOLEN_RESERVED_8M: *size = 8 * 1024 * 1024; break; default: *size = 8 * 1024 * 1024; MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); } if (HAS_LMEMBAR_SMEM_STOLEN(i915)) /* the base is initialized to stolen top so subtract size to get base */ *base -= *size; else *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; } /* * Initialize i915->dsm.reserved to contain the reserved space within the Data * Stolen Memory. This is a range on the top of DSM that is reserved, not to * be used by driver, so must be excluded from the region passed to the * allocator later. In the spec this is also called as WOPCM. * * Our expectation is that the reserved space is at the top of the stolen * region, as it has been the case for every platform, and *never* at the * bottom, so the calculation here can be simplified. */ static int init_reserved_stolen(struct drm_i915_private *i915) { struct intel_uncore *uncore = &i915->uncore; resource_size_t reserved_base, stolen_top; resource_size_t reserved_size; int ret = 0; stolen_top = i915->dsm.stolen.end + 1; reserved_base = stolen_top; reserved_size = 0; if (GRAPHICS_VER(i915) >= 11) { icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 8) { if (IS_LP(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else bdw_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 7) { if (IS_VALLEYVIEW(i915)) vlv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else gen7_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 6) { gen6_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { g4x_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } /* No reserved stolen */ if (reserved_base == stolen_top) goto bail_out; if (!reserved_base) { drm_err(&i915->drm, "inconsistent reservation %pa + %pa; ignoring\n", &reserved_base, &reserved_size); ret = -EINVAL; goto bail_out; } i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size); if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) { drm_err(&i915->drm, "Stolen reserved area %pR outside stolen memory %pR\n", &i915->dsm.reserved, &i915->dsm.stolen); ret = -EINVAL; goto bail_out; } return 0; bail_out: i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0); return ret; } static int i915_gem_init_stolen(struct intel_memory_region *mem) { struct drm_i915_private *i915 = mem->i915; mutex_init(&i915->mm.stolen_lock); if (intel_vgpu_active(i915)) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "iGVT-g active"); return -ENOSPC; } if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); return -ENOSPC; } if (adjust_stolen(i915, &mem->region)) return -ENOSPC; if (request_smem_stolen(i915, &mem->region)) return -ENOSPC; i915->dsm.stolen = mem->region; if (init_reserved_stolen(i915)) return -ENOSPC; /* Exclude the reserved region from driver use */ mem->region.end = i915->dsm.reserved.start - 1; mem->io_size = min(mem->io_size, resource_size(&mem->region)); i915->dsm.usable_size = resource_size(&mem->region); drm_dbg(&i915->drm, "Memory reserved for graphics device: %lluK, usable: %lluK\n", (u64)resource_size(&i915->dsm.stolen) >> 10, (u64)i915->dsm.usable_size >> 10); if (i915->dsm.usable_size == 0) return -ENOSPC; /* Basic memrange allocator for stolen space. */ drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size); /* * Access to stolen lmem beyond certain size for MTL A0 stepping * would crash the machine. Disable stolen lmem for userspace access * by setting usable_size to zero. */ if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0) i915->dsm.usable_size = 0; return 0; } static void dbg_poison(struct i915_ggtt *ggtt, dma_addr_t addr, resource_size_t size, u8 x) { #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) if (!drm_mm_node_allocated(&ggtt->error_capture)) return; if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) return; /* beware stop_machine() inversion */ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); mutex_lock(&ggtt->error_mutex); while (size) { void __iomem *s; ggtt->vm.insert_page(&ggtt->vm, addr, ggtt->error_capture.start, i915_gem_get_pat_index(ggtt->vm.i915, I915_CACHE_NONE), 0); mb(); s = io_mapping_map_wc(&ggtt->iomap, ggtt->error_capture.start, PAGE_SIZE); memset_io(s, x, PAGE_SIZE); io_mapping_unmap(s); addr += PAGE_SIZE; size -= PAGE_SIZE; } mb(); ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); mutex_unlock(&ggtt->error_mutex); #endif } static struct sg_table * i915_pages_create_for_stolen(struct drm_device *dev, resource_size_t offset, resource_size_t size) { struct drm_i915_private *i915 = to_i915(dev); struct sg_table *st; struct scatterlist *sg; GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen))); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake * dma mapping in a single scatterlist. */ st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, 1, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); } sg = st->sgl; sg->offset = 0; sg->length = size; sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset; sg_dma_len(sg) = size; return st; } static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *pages = i915_pages_create_for_stolen(obj->base.dev, obj->stolen->start, obj->stolen->size); if (IS_ERR(pages)) return PTR_ERR(pages); dbg_poison(to_gt(i915)->ggtt, sg_dma_address(pages->sgl), sg_dma_len(pages->sgl), POISON_INUSE); __i915_gem_object_set_pages(obj, pages); return 0; } static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, struct sg_table *pages) { struct drm_i915_private *i915 = to_i915(obj->base.dev); /* Should only be called from i915_gem_object_release_stolen() */ dbg_poison(to_gt(i915)->ggtt, sg_dma_address(pages->sgl), sg_dma_len(pages->sgl), POISON_FREE); sg_free_table(pages); kfree(pages); } static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); i915_gem_stolen_remove_node(i915, stolen); kfree(stolen); i915_gem_object_release_memory_region(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .name = "i915_gem_object_stolen", .get_pages = i915_gem_object_get_pages_stolen, .put_pages = i915_gem_object_put_pages_stolen, .release = i915_gem_object_release_stolen, }; static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, struct drm_mm_node *stolen) { static struct lock_class_key lock_class; unsigned int cache_level; unsigned int flags; int err; /* * Stolen objects are always physically contiguous since we just * allocate one big block underneath using the drm_mm range allocator. */ flags = I915_BO_ALLOC_CONTIGUOUS; drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) return -EBUSY; i915_gem_object_init_memory_region(obj, mem); err = i915_gem_object_pin_pages(obj); if (err) i915_gem_object_release_memory_region(obj); i915_gem_object_unlock(obj); return err; } static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, resource_size_t offset, resource_size_t size, resource_size_t page_size, unsigned int flags) { struct drm_i915_private *i915 = mem->i915; struct drm_mm_node *stolen; int ret; if (!drm_mm_initialized(&i915->mm.stolen)) return -ENODEV; if (size == 0) return -EINVAL; /* * With discrete devices, where we lack a mappable aperture there is no * possible way to ever access this memory on the CPU side. */ if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size && !(flags & I915_BO_ALLOC_GPU_ONLY)) return -ENOSPC; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) return -ENOMEM; if (offset != I915_BO_INVALID_OFFSET) { drm_dbg(&i915->drm, "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", &offset, &size); stolen->start = offset; stolen->size = size; mutex_lock(&i915->mm.stolen_lock); ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); mutex_unlock(&i915->mm.stolen_lock); } else { ret = i915_gem_stolen_insert_node(i915, stolen, size, mem->min_page_size); } if (ret) goto err_free; ret = __i915_gem_object_create_stolen(mem, obj, stolen); if (ret) goto err_remove; return 0; err_remove: i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); return ret; } struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size) { return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); } static int init_stolen_smem(struct intel_memory_region *mem) { int err; /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. */ err = i915_gem_init_stolen(mem); if (err) drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n"); return 0; } static int release_stolen_smem(struct intel_memory_region *mem) { i915_gem_cleanup_stolen(mem->i915); return 0; } static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { .init = init_stolen_smem, .release = release_stolen_smem, .init_object = _i915_gem_object_stolen_init, }; static int init_stolen_lmem(struct intel_memory_region *mem) { struct drm_i915_private *i915 = mem->i915; int err; if (GEM_WARN_ON(resource_size(&mem->region) == 0)) return 0; err = i915_gem_init_stolen(mem); if (err) { drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n"); return 0; } if (mem->io_size && !io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size)) goto err_cleanup; drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n", &mem->io_start); drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start); return 0; err_cleanup: i915_gem_cleanup_stolen(mem->i915); return err; } static int release_stolen_lmem(struct intel_memory_region *mem) { if (mem->io_size) io_mapping_fini(&mem->iomap); i915_gem_cleanup_stolen(mem->i915); return 0; } static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { .init = init_stolen_lmem, .release = release_stolen_lmem, .init_object = _i915_gem_object_stolen_init, }; static int mtl_get_gms_size(struct intel_uncore *uncore) { u16 ggc, gms; ggc = intel_uncore_read16(uncore, GGC); /* check GGMS, should be fixed 0x3 (8MB) */ if ((ggc & GGMS_MASK) != GGMS_MASK) return -EIO; /* return valid GMS value, -EIO if invalid */ gms = REG_FIELD_GET(GMS_MASK, ggc); switch (gms) { case 0x0 ... 0x04: return gms * 32; case 0xf0 ... 0xfe: return (gms - 0xf0 + 1) * 4; default: MISSING_CASE(gms); return -EIO; } } struct intel_memory_region * i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, u16 instance) { struct intel_uncore *uncore = &i915->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); resource_size_t dsm_size, dsm_base, lmem_size; struct intel_memory_region *mem; resource_size_t io_start, io_size; resource_size_t min_page_size; int ret; if (WARN_ON_ONCE(instance)) return ERR_PTR(-ENODEV); if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) return ERR_PTR(-ENXIO); if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) { lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); } else { resource_size_t lmem_range; lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF; lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; lmem_size *= SZ_1G; } if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { /* * MTL dsm size is in GGC register. * Also MTL uses offset to GSMBASE in ptes, so i915 * uses dsm_base = 8MBs to setup stolen region, since * DSMBASE = GSMBASE + 8MB. */ ret = mtl_get_gms_size(uncore); if (ret < 0) { drm_err(&i915->drm, "invalid MTL GGC register setting\n"); return ERR_PTR(ret); } dsm_base = SZ_8M; dsm_size = (resource_size_t)(ret * SZ_1M); GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M); GEM_BUG_ON((dsm_base + dsm_size) > lmem_size); } else { /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK; if (WARN_ON(lmem_size < dsm_base)) return ERR_PTR(-ENODEV); dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); } if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { io_start = 0; io_size = 0; } else { io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; io_size = dsm_size; } min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : I915_GTT_PAGE_SIZE_4K; mem = intel_memory_region_create(i915, dsm_base, dsm_size, min_page_size, io_start, io_size, type, instance, &i915_region_stolen_lmem_ops); if (IS_ERR(mem)) return mem; intel_memory_region_set_name(mem, "stolen-local"); mem->private = true; return mem; } struct intel_memory_region* i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, u16 instance) { struct intel_memory_region *mem; mem = intel_memory_region_create(i915, intel_graphics_stolen_res.start, resource_size(&intel_graphics_stolen_res), PAGE_SIZE, 0, 0, type, instance, &i915_region_stolen_smem_ops); if (IS_ERR(mem)) return mem; intel_memory_region_set_name(mem, "stolen-system"); mem->private = true; return mem; } bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) { return obj->ops == &i915_gem_object_stolen_ops; } bool i915_gem_stolen_initialized(const struct drm_i915_private *i915) { return drm_mm_initialized(&i915->mm.stolen); } u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915) { return i915->dsm.stolen.start; } u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915) { return resource_size(&i915->dsm.stolen); } u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915, const struct drm_mm_node *node) { return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node); } bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node) { return drm_mm_node_allocated(node); } u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node) { return node->start; } u64 i915_gem_stolen_node_size(const struct drm_mm_node *node) { return node->size; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include <drm/drm_cache.h> #include "display/intel_frontbuffer.h" #include "i915_config.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_sw_fence_work.h" #include "i915_trace.h" struct clflush { struct dma_fence_work base; struct drm_i915_gem_object *obj; }; static void __do_clflush(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); drm_clflush_sg(obj->mm.pages); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); } static void clflush_work(struct dma_fence_work *base) { struct clflush *clflush = container_of(base, typeof(*clflush), base); __do_clflush(clflush->obj); } static void clflush_release(struct dma_fence_work *base) { struct clflush *clflush = container_of(base, typeof(*clflush), base); i915_gem_object_unpin_pages(clflush->obj); i915_gem_object_put(clflush->obj); } static const struct dma_fence_work_ops clflush_ops = { .name = "clflush", .work = clflush_work, .release = clflush_release, }; static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) { struct clflush *clflush; GEM_BUG_ON(!obj->cache_dirty); clflush = kmalloc(sizeof(*clflush), GFP_KERNEL); if (!clflush) return NULL; if (__i915_gem_object_get_pages(obj) < 0) { kfree(clflush); return NULL; } dma_fence_work_init(&clflush->base, &clflush_ops); clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */ return clflush; } bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct clflush *clflush; assert_object_held(obj); if (IS_DGFX(i915)) { WARN_ON_ONCE(obj->cache_dirty); return false; } /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. * Similarly, we only access struct pages through the CPU cache, so * anything not backed by physical memory we consider to be always * coherent and not need clflushing. */ if (!i915_gem_object_has_struct_page(obj)) { obj->cache_dirty = false; return false; } /* If the GPU is snooping the contents of the CPU cache, * we do not need to manually clear the CPU cache lines. However, * the caches are only snooped when the render cache is * flushed/invalidated. As we always have to emit invalidations * and flushes when moving into and out of the RENDER domain, correct * snooping behaviour occurs naturally as the result of our domain * tracking. */ if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) return false; trace_i915_gem_object_clflush(obj); clflush = NULL; if (!(flags & I915_CLFLUSH_SYNC) && dma_resv_reserve_fences(obj->base.resv, 1) == 0) clflush = clflush_work_create(obj); if (clflush) { i915_sw_fence_await_reservation(&clflush->base.chain, obj->base.resv, true, i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_fence(obj->base.resv, &clflush->base.dma, DMA_RESV_USAGE_KERNEL); dma_fence_work_commit(&clflush->base); /* * We must have successfully populated the pages(since we are * holding a pin on the pages as per the flush worker) to reach * this point, which must mean we have already done the required * flush-on-acquire, hence resetting cache_dirty here should be * safe. */ obj->cache_dirty = false; } else if (obj->mm.pages) { __do_clflush(obj); obj->cache_dirty = false; } else { GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); } return true; }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2014-2016 Intel Corporation */ #include <linux/scatterlist.h> #include <linux/slab.h> #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_internal.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_utils.h" #define QUIET (__GFP_NORETRY | __GFP_NOWARN) #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) static void internal_free_pages(struct sg_table *st) { struct scatterlist *sg; for (sg = st->sgl; sg; sg = __sg_next(sg)) { if (sg_page(sg)) __free_pages(sg_page(sg), get_order(sg->length)); } sg_free_table(st); kfree(st); } static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; unsigned int npages; /* restricted by sg_alloc_table */ int max_order = MAX_ORDER; unsigned int max_segment; gfp_t gfp; if (overflows_type(obj->base.size >> PAGE_SHIFT, npages)) return -E2BIG; npages = obj->base.size >> PAGE_SHIFT; max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT; max_order = min(max_order, get_order(max_segment)); gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; } create_st: st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return -ENOMEM; } sg = st->sgl; st->nents = 0; do { int order = min(fls(npages) - 1, max_order); struct page *page; do { page = alloc_pages(gfp | (order ? QUIET : MAYFAIL), order); if (page) break; if (!order--) goto err; /* Limit subsequent allocations as well */ max_order = order; } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); st->nents++; npages -= 1 << order; if (!npages) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while (1); if (i915_gem_gtt_prepare_pages(obj, st)) { /* Failed to dma-map try again with single page sg segments */ if (get_order(st->sgl->length)) { internal_free_pages(st); max_order = 0; goto create_st; } goto err; } __i915_gem_object_set_pages(obj, st); return 0; err: sg_set_page(sg, NULL, 0, 0); sg_mark_end(sg); internal_free_pages(st); return -ENOMEM; } static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, struct sg_table *pages) { i915_gem_gtt_finish_pages(obj, pages); internal_free_pages(pages); obj->mm.dirty = false; __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { .name = "i915_gem_object_internal", .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = i915_gem_object_get_pages_internal, .put_pages = i915_gem_object_put_pages_internal, }; struct drm_i915_gem_object * __i915_gem_object_create_internal(struct drm_i915_private *i915, const struct drm_i915_gem_object_ops *ops, phys_addr_t size) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; /* * Mark the object as volatile, such that the pages are marked as * dontneed whilst they are still pinned. As soon as they are unpinned * they are allowed to be reaped by the shrinker, and the caller is * expected to repopulate - the contents of this object are only valid * whilst active and pinned. */ i915_gem_object_set_volatile(obj); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); return obj; } /** * i915_gem_object_create_internal: create an object with volatile pages * @i915: the i915 device * @size: the size in bytes of backing storage to allocate for the object * * Creates a new object that wraps some internal memory for private use. * This object is not backed by swappable storage, and as such its contents * are volatile and only valid whilst pinned. If the object is reaped by the * shrinker, its pages and data will be discarded. Equally, it is not a full * GEM object and so not valid for access from userspace. This makes it useful * for hardware interfaces like ringbuffers (which are pinned from the time * the request is written to the time the hardware stops accessing it), but * not for contexts (which need to be preserved when not active for later * reuse). Note that it is not cleared upon allocation. */ struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *i915, phys_addr_t size) { return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size); }
linux-master
drivers/gpu/drm/i915/gem/i915_gem_internal.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "mock_dmabuf.h" static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct mock_dmabuf *mock = to_mock(attachment->dmabuf); struct sg_table *st; struct scatterlist *sg; int i, err; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); err = sg_alloc_table(st, mock->npages, GFP_KERNEL); if (err) goto err_free; sg = st->sgl; for (i = 0; i < mock->npages; i++) { sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); sg = sg_next(sg); } err = dma_map_sgtable(attachment->dev, st, dir, 0); if (err) goto err_st; return st; err_st: sg_free_table(st); err_free: kfree(st); return ERR_PTR(err); } static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *st, enum dma_data_direction dir) { dma_unmap_sgtable(attachment->dev, st, dir, 0); sg_free_table(st); kfree(st); } static void mock_dmabuf_release(struct dma_buf *dma_buf) { struct mock_dmabuf *mock = to_mock(dma_buf); int i; for (i = 0; i < mock->npages; i++) put_page(mock->pages[i]); kfree(mock); } static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); void *vaddr; vaddr = vm_map_ram(mock->pages, mock->npages, 0); if (!vaddr) return -ENOMEM; iosys_map_set_vaddr(map, vaddr); return 0; } static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); vm_unmap_ram(map->vaddr, mock->npages); } static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { return -ENODEV; } static const struct dma_buf_ops mock_dmabuf_ops = { .map_dma_buf = mock_map_dma_buf, .unmap_dma_buf = mock_unmap_dma_buf, .release = mock_dmabuf_release, .mmap = mock_dmabuf_mmap, .vmap = mock_dmabuf_vmap, .vunmap = mock_dmabuf_vunmap, }; static struct dma_buf *mock_dmabuf(int npages) { struct mock_dmabuf *mock; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dmabuf; int i; mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), GFP_KERNEL); if (!mock) return ERR_PTR(-ENOMEM); mock->npages = npages; for (i = 0; i < npages; i++) { mock->pages[i] = alloc_page(GFP_KERNEL); if (!mock->pages[i]) goto err; } exp_info.ops = &mock_dmabuf_ops; exp_info.size = npages * PAGE_SIZE; exp_info.flags = O_CLOEXEC; exp_info.priv = mock; dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) goto err; return dmabuf; err: while (i--) put_page(mock->pages[i]); kfree(mock); return ERR_PTR(-ENOMEM); }
linux-master
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include <linux/highmem.h> #include <linux/prime_numbers.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" #include "gem/i915_gem_ttm_move.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_migrate.h" #include "i915_reg.h" #include "i915_ttm_buddy_manager.h" #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_reset.h" #include "selftests/igt_mmap.h" struct tile { unsigned int width; unsigned int height; unsigned int stride; unsigned int size; unsigned int tiling; unsigned int swizzle; }; static u64 swizzle_bit(unsigned int bit, u64 offset) { return (offset & BIT_ULL(bit)) >> (bit - 6); } static u64 tiled_offset(const struct tile *tile, u64 v) { u64 x, y; if (tile->tiling == I915_TILING_NONE) return v; y = div64_u64_rem(v, tile->stride, &x); v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; if (tile->tiling == I915_TILING_X) { v += y * tile->width; v += div64_u64_rem(x, tile->width, &x) << tile->size; v += x; } else if (tile->width == 128) { const unsigned int ytile_span = 16; const unsigned int ytile_height = 512; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; v += x; } else { const unsigned int ytile_span = 32; const unsigned int ytile_height = 256; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; v += x; } switch (tile->swizzle) { case I915_BIT_6_SWIZZLE_9: v ^= swizzle_bit(9, v); break; case I915_BIT_6_SWIZZLE_9_10: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); break; case I915_BIT_6_SWIZZLE_9_11: v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); break; case I915_BIT_6_SWIZZLE_9_10_11: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); break; } return v; } static int check_partial_mapping(struct drm_i915_gem_object *obj, const struct tile *tile, struct rnd_state *prng) { const unsigned long npages = obj->base.size / PAGE_SIZE; struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_gtt_view view; struct i915_vma *vma; unsigned long offset; unsigned long page; u32 __iomem *io; struct page *p; unsigned int n; u32 *cpu; int err; err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); if (err) { pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", tile->tiling, tile->stride, err); return err; } GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { pr_err("Failed to flush to GTT write domain; err=%d\n", err); return err; } page = i915_prandom_u32_max_state(npages, prng); view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { pr_err("Failed to pin partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(vma)); return PTR_ERR(vma); } n = page - view.partial.offset; GEM_BUG_ON(n >= view.partial.size); io = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(io)) { pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(io)); err = PTR_ERR(io); goto out; } iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); i915_vma_unpin_iomap(vma); offset = tiled_offset(tile, page << PAGE_SHIFT); if (offset >= obj->base.size) goto out; intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); drm_clflush_virt_range(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), offset, (u32)page, *cpu); err = -EINVAL; } *cpu = 0; drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap(p); out: i915_gem_object_lock(obj, NULL); i915_vma_destroy(vma); i915_gem_object_unlock(obj); return err; } static int check_partial_mappings(struct drm_i915_gem_object *obj, const struct tile *tile, unsigned long end_time) { const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; unsigned long page; int err; err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); if (err) { pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", tile->tiling, tile->stride, err); return err; } GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { pr_err("Failed to flush to GTT write domain; err=%d\n", err); return err; } for_each_prime_number_from(page, 1, npages) { struct i915_gtt_view view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); unsigned long offset; u32 __iomem *io; struct page *p; unsigned int n; u32 *cpu; GEM_BUG_ON(view.partial.size > nreal); cond_resched(); vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { pr_err("Failed to pin partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(vma)); return PTR_ERR(vma); } n = page - view.partial.offset; GEM_BUG_ON(n >= view.partial.size); io = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(io)) { pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(io)); return PTR_ERR(io); } iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); i915_vma_unpin_iomap(vma); offset = tiled_offset(tile, page << PAGE_SHIFT); if (offset >= obj->base.size) continue; intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); drm_clflush_virt_range(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), offset, (u32)page, *cpu); err = -EINVAL; } *cpu = 0; drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap(p); if (err) return err; i915_gem_object_lock(obj, NULL); i915_vma_destroy(vma); i915_gem_object_unlock(obj); if (igt_timeout(end_time, "%s: timed out after tiling=%d stride=%d\n", __func__, tile->tiling, tile->stride)) return -EINTR; } return 0; } static unsigned int setup_tile_size(struct tile *tile, struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) <= 2) { tile->height = 16; tile->width = 128; tile->size = 11; } else if (tile->tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)) { tile->height = 32; tile->width = 128; tile->size = 12; } else { tile->height = 8; tile->width = 512; tile->size = 12; } if (GRAPHICS_VER(i915) < 4) return 8192 / tile->width; else if (GRAPHICS_VER(i915) < 7) return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; else return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; } static int igt_partial_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; int tiling; int err; if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0; /* We want to check the page mapping and fencing of a large object * mmapped through the GTT. The object we create is larger than can * possibly be mmaped as a whole, and so we must use partial GGTT vma. * We then check that a write through each partial GGTT vma ends up * in the right set of pages within the object, and with the expected * tiling, which we verify by manual swizzling. */ obj = huge_gem_object(i915, nreal << PAGE_SHIFT, (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); err = i915_gem_object_pin_pages_unlocked(obj); if (err) { pr_err("Failed to allocate %u pages (%lu total), err=%d\n", nreal, obj->base.size / PAGE_SIZE, err); goto out; } wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (1) { IGT_TIMEOUT(end); struct tile tile; tile.height = 1; tile.width = 1; tile.size = 0; tile.stride = 0; tile.swizzle = I915_BIT_6_SWIZZLE_NONE; tile.tiling = I915_TILING_NONE; err = check_partial_mappings(obj, &tile, end); if (err && err != -EINTR) goto out_unlock; } for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { IGT_TIMEOUT(end); unsigned int max_pitch; unsigned int pitch; struct tile tile; if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. * See i915_gem_detect_bit_6_swizzle(). */ break; tile.tiling = tiling; switch (tiling) { case I915_TILING_X: tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; break; } GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; max_pitch = setup_tile_size(&tile, i915); for (pitch = max_pitch; pitch; pitch >>= 1) { tile.stride = tile.width * pitch; err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch - 1); err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch + 1); err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } } if (GRAPHICS_VER(i915) >= 4) { for_each_prime_number(pitch, max_pitch) { tile.stride = tile.width * pitch; err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } } next_tiling: ; } out_unlock: intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); return err; } static int igt_smoke_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; I915_RND_STATE(prng); unsigned long count; IGT_TIMEOUT(end); int err; if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0; /* * igt_partial_tiling() does an exhastive check of partial tiling * chunking, but will undoubtably run out of time. Here, we do a * randomised search and hope over many runs of 1s with different * seeds we will do a thorough check. * * Remember to look at the st_seed if we see a flip-flop in BAT! */ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return 0; obj = huge_gem_object(i915, nreal << PAGE_SHIFT, (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); err = i915_gem_object_pin_pages_unlocked(obj); if (err) { pr_err("Failed to allocate %u pages (%lu total), err=%d\n", nreal, obj->base.size / PAGE_SIZE, err); goto out; } wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; do { struct tile tile; tile.tiling = i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); switch (tile.tiling) { case I915_TILING_NONE: tile.height = 1; tile.width = 1; tile.size = 0; tile.stride = 0; tile.swizzle = I915_BIT_6_SWIZZLE_NONE; break; case I915_TILING_X: tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; break; } if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; if (tile.tiling != I915_TILING_NONE) { unsigned int max_pitch = setup_tile_size(&tile, i915); tile.stride = i915_prandom_u32_max_state(max_pitch, &prng); tile.stride = (1 + tile.stride) * tile.width; if (GRAPHICS_VER(i915) < 4) tile.stride = rounddown_pow_of_two(tile.stride); } err = check_partial_mapping(obj, &tile, &prng); if (err) break; count++; } while (!__igt_timeout(end, NULL)); pr_info("%s: Completed %lu trials\n", __func__, count); intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); return err; } static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_engine_cs *engine; for_each_uabi_engine(engine, i915) { struct i915_request *rq; struct i915_vma *vma; struct i915_gem_ww_ctx ww; int err; vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); i915_gem_ww_ctx_init(&ww, false); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) goto err; rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; } err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_request_add(rq); err_unpin: i915_vma_unpin(vma); err: if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); if (err) return err; } i915_gem_object_put(obj); /* leave it only alive via its active ref */ return 0; } static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) { if (HAS_LMEM(i915)) return I915_MMAP_TYPE_FIXED; return I915_MMAP_TYPE_GTT; } static struct drm_i915_gem_object * create_sys_or_internal(struct drm_i915_private *i915, unsigned long size) { if (HAS_LMEM(i915)) { struct intel_memory_region *sys_region = i915->mm.regions[INTEL_REGION_SMEM]; return __i915_gem_object_create_user(i915, size, &sys_region, 1); } return i915_gem_object_create_internal(i915, size); } static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) { struct drm_i915_gem_object *obj; u64 offset; int ret; obj = create_sys_or_internal(i915, size); if (IS_ERR(obj)) return expected && expected == PTR_ERR(obj); ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); i915_gem_object_put(obj); return ret == expected; } static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); intel_gt_pm_get(to_gt(i915)); cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) { igt_flush_test(i915); intel_gt_pm_put(to_gt(i915)); i915_gem_driver_register__shrinker(i915); } static void mmap_offset_lock(struct drm_i915_private *i915) __acquires(&i915->drm.vma_offset_manager->vm_lock) { write_lock(&i915->drm.vma_offset_manager->vm_lock); } static void mmap_offset_unlock(struct drm_i915_private *i915) __releases(&i915->drm.vma_offset_manager->vm_lock) { write_unlock(&i915->drm.vma_offset_manager->vm_lock); } static int igt_mmap_offset_exhaustion(void *arg) { struct drm_i915_private *i915 = arg; struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; struct drm_mm_node *hole, *next; int loop, err = 0; u64 offset; int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ disable_retire_worker(i915); GEM_BUG_ON(!to_gt(i915)->awake); intel_gt_retire_requests(to_gt(i915)); i915_gem_drain_freed_objects(i915); /* Trim the device mmap space to only a page */ mmap_offset_lock(i915); loop = 1; /* PAGE_SIZE units */ list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { struct drm_mm_node *resv; resv = kzalloc(sizeof(*resv), GFP_NOWAIT); if (!resv) { err = -ENOMEM; goto out_park; } resv->start = drm_mm_hole_node_start(hole) + loop; resv->size = hole->hole_size - loop; resv->color = -1ul; loop = 0; if (!resv->size) { kfree(resv); continue; } pr_debug("Reserving hole [%llx + %llx]\n", resv->start, resv->size); err = drm_mm_reserve_node(mm, resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); kfree(resv); goto out_park; } } GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); mmap_offset_unlock(i915); /* Just fits! */ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { pr_err("Unable to insert object into single page hole\n"); err = -EINVAL; goto out; } /* Too large */ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("Unable to create object for reclaimed hole\n"); goto out; } err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); if (err) { pr_err("Unable to insert object into reclaimed hole\n"); goto err_obj; } if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; } i915_gem_object_put(obj); /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { if (intel_gt_is_wedged(to_gt(i915))) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; } err = make_obj_busy(obj); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; } } out: mmap_offset_lock(i915); out_park: drm_mm_for_each_node_safe(hole, next, mm) { if (hole->color != -1ul) continue; drm_mm_remove_node(hole); kfree(hole); } mmap_offset_unlock(i915); restore_retire_worker(i915); return err; err_obj: i915_gem_object_put(obj); goto out; } static int gtt_set(struct drm_i915_gem_object *obj) { struct i915_vma *vma; void __iomem *map; int err = 0; vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out; } memset_io(map, POISON_INUSE, obj->base.size); i915_vma_unpin_iomap(vma); out: intel_gt_pm_put(vma->vm->gt); return err; } static int gtt_check(struct drm_i915_gem_object *obj) { struct i915_vma *vma; void __iomem *map; int err = 0; vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out; } if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { pr_err("%s: Write via mmap did not land in backing store (GTT)\n", obj->mm.region->name); err = -EINVAL; } i915_vma_unpin_iomap(vma); out: intel_gt_pm_put(vma->vm->gt); return err; } static int wc_set(struct drm_i915_gem_object *obj) { void *vaddr; vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); memset(vaddr, POISON_INUSE, obj->base.size); i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); return 0; } static int wc_check(struct drm_i915_gem_object *obj) { void *vaddr; int err = 0; vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { pr_err("%s: Write via mmap did not land in backing store (WC)\n", obj->mm.region->name); err = -EINVAL; } i915_gem_object_unpin_map(obj); return err; } static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { struct drm_i915_private *i915 = to_i915(obj->base.dev); bool no_map; if (obj->ops->mmap_offset) return type == I915_MMAP_TYPE_FIXED; else if (type == I915_MMAP_TYPE_FIXED) return false; if (type == I915_MMAP_TYPE_GTT && !i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return false; i915_gem_object_lock(obj, NULL); no_map = (type != I915_MMAP_TYPE_GTT && !i915_gem_object_has_struct_page(obj) && !i915_gem_object_has_iomem(obj)); i915_gem_object_unlock(obj); return !no_map; } #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) static int __igt_mmap(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { struct vm_area_struct *area; unsigned long addr; int err, i; u64 offset; if (!can_mmap(obj, type)) return 0; err = wc_set(obj); if (err == -ENXIO) err = gtt_set(obj); if (err) return err; err = __assign_mmap_offset(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); mmap_read_lock(current->mm); area = vma_lookup(current->mm, addr); mmap_read_unlock(current->mm); if (!area) { pr_err("%s: Did not create a vm_area_struct for the mmap\n", obj->mm.region->name); err = -EINVAL; goto out_unmap; } for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; if (get_user(x, ux)) { pr_err("%s: Unable to read from mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); err = -EFAULT; goto out_unmap; } if (x != expand32(POISON_INUSE)) { pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", obj->mm.region->name, i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; goto out_unmap; } x = expand32(POISON_FREE); if (put_user(x, ux)) { pr_err("%s: Unable to write to mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); err = -EFAULT; goto out_unmap; } } if (type == I915_MMAP_TYPE_GTT) intel_gt_flush_ggtt_writes(to_gt(i915)); err = wc_check(obj); if (err == -ENXIO) err = gtt_check(obj); out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; for_each_memory_region(mr, i915, id) { unsigned long sizes[] = { PAGE_SIZE, mr->min_page_size, SZ_4M, }; int i; if (mr->private) continue; for (i = 0; i < ARRAY_SIZE(sizes); i++) { struct drm_i915_gem_object *obj; int err; obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) return err; } } return 0; } static void igt_close_objects(struct drm_i915_private *i915, struct list_head *objects) { struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */ __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } cond_resched(); i915_gem_drain_freed_objects(i915); } static void igt_make_evictable(struct list_head *objects) { struct drm_i915_gem_object *obj; list_for_each_entry(obj, objects, st_link) { i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); i915_gem_object_unlock(obj); } cond_resched(); } static int igt_fill_mappable(struct intel_memory_region *mr, struct list_head *objects) { u64 size, total; int err; total = 0; size = mr->io_size; do { struct drm_i915_gem_object *obj; obj = i915_gem_object_create_region(mr, size, 0, 0); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto err_close; } list_add(&obj->st_link, objects); err = i915_gem_object_pin_pages_unlocked(obj); if (err) { if (err != -ENXIO && err != -ENOMEM) goto err_close; if (size == mr->min_page_size) { err = 0; break; } size >>= 1; continue; } total += obj->base.size; } while (1); pr_info("%s filled=%lluMiB\n", __func__, total >> 20); return 0; err_close: igt_close_objects(mr->i915, objects); return err; } static int ___igt_mmap_migrate(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, unsigned long addr, bool unfaultable) { struct vm_area_struct *area; int err = 0, i; pr_info("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr); mmap_read_lock(current->mm); area = vma_lookup(current->mm, addr); mmap_read_unlock(current->mm); if (!area) { pr_err("%s: Did not create a vm_area_struct for the mmap\n", obj->mm.region->name); err = -EINVAL; goto out_unmap; } for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; if (get_user(x, ux)) { err = -EFAULT; if (!unfaultable) { pr_err("%s: Unable to read from mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); goto out_unmap; } continue; } if (unfaultable) { pr_err("%s: Faulted unmappable memory\n", obj->mm.region->name); err = -EINVAL; goto out_unmap; } if (x != expand32(POISON_INUSE)) { pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", obj->mm.region->name, i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; goto out_unmap; } x = expand32(POISON_FREE); if (put_user(x, ux)) { pr_err("%s: Unable to write to mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); err = -EFAULT; goto out_unmap; } } if (unfaultable) { if (err == -EFAULT) err = 0; } else { obj->flags &= ~I915_BO_ALLOC_GPU_ONLY; err = wc_check(obj); } out_unmap: vm_munmap(addr, obj->base.size); return err; } #define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0) #define IGT_MMAP_MIGRATE_FILL (1 << 1) #define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2) #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3) #define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4) static int __igt_mmap_migrate(struct intel_memory_region **placements, int n_placements, struct intel_memory_region *expected_mr, unsigned int flags) { struct drm_i915_private *i915 = placements[0]->i915; struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; unsigned long addr; LIST_HEAD(objects); u64 offset; int err; obj = __i915_gem_object_create_user(i915, PAGE_SIZE, placements, n_placements); if (IS_ERR(obj)) return PTR_ERR(obj); if (flags & IGT_MMAP_MIGRATE_TOPDOWN) obj->flags |= I915_BO_ALLOC_GPU_ONLY; err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL); if (err) goto out_put; /* * This will eventually create a GEM context, due to opening dummy drm * file, which needs a tiny amount of mappable device memory for the top * level paging structures(and perhaps scratch), so make sure we * allocate early, to avoid tears. */ addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) { err = addr; goto out_put; } if (flags & IGT_MMAP_MIGRATE_FILL) { err = igt_fill_mappable(placements[0], &objects); if (err) goto out_put; } err = i915_gem_object_lock(obj, NULL); if (err) goto out_put; err = i915_gem_object_pin_pages(obj); if (err) { i915_gem_object_unlock(obj); goto out_put; } err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL, obj->mm.pages->sgl, obj->pat_index, i915_gem_object_is_lmem(obj), expand32(POISON_INUSE), &rq); i915_gem_object_unpin_pages(obj); if (rq) { err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err) dma_resv_add_fence(obj->base.resv, &rq->fence, DMA_RESV_USAGE_KERNEL); i915_request_put(rq); } i915_gem_object_unlock(obj); if (err) goto out_put; if (flags & IGT_MMAP_MIGRATE_EVICTABLE) igt_make_evictable(&objects); if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { err = i915_gem_object_lock(obj, NULL); if (err) goto out_put; /* * Ensure we only simulate the gpu failuire when faulting the * pages. */ err = i915_gem_object_wait_moving_fence(obj, true); i915_gem_object_unlock(obj); if (err) goto out_put; i915_ttm_migrate_set_failure_modes(true, false); } err = ___igt_mmap_migrate(i915, obj, addr, flags & IGT_MMAP_MIGRATE_UNFAULTABLE); if (!err && obj->mm.region != expected_mr) { pr_err("%s region mismatch %s\n", __func__, expected_mr->name); err = -EINVAL; } if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { struct intel_gt *gt; unsigned int id; i915_ttm_migrate_set_failure_modes(false, false); for_each_gt(gt, i915, id) { intel_wakeref_t wakeref; bool wedged; mutex_lock(&gt->reset.mutex); wedged = test_bit(I915_WEDGED, &gt->reset.flags); mutex_unlock(&gt->reset.mutex); if (!wedged) { pr_err("gt(%u) not wedged\n", id); err = -EINVAL; continue; } wakeref = intel_runtime_pm_get(gt->uncore->rpm); igt_global_reset_lock(gt); intel_gt_reset(gt, ALL_ENGINES, NULL); igt_global_reset_unlock(gt); intel_runtime_pm_put(gt->uncore->rpm, wakeref); } if (!i915_gem_object_has_unknown_state(obj)) { pr_err("object missing unknown_state\n"); err = -EINVAL; } } out_put: i915_gem_object_put(obj); igt_close_objects(i915, &objects); return err; } static int igt_mmap_migrate(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM]; struct intel_memory_region *mr; enum intel_region_id id; for_each_memory_region(mr, i915, id) { struct intel_memory_region *mixed[] = { mr, system }; struct intel_memory_region *single[] = { mr }; struct ttm_resource_manager *man = mr->region_private; resource_size_t saved_io_size; int err; if (mr->private) continue; if (!mr->io_size) continue; /* * For testing purposes let's force small BAR, if not already * present. */ saved_io_size = mr->io_size; if (mr->io_size == mr->total) { resource_size_t io_size = mr->io_size; io_size = rounddown_pow_of_two(io_size >> 1); if (io_size < PAGE_SIZE) continue; mr->io_size = io_size; i915_ttm_buddy_man_force_visible_size(man, io_size >> PAGE_SHIFT); } /* * Allocate in the mappable portion, should be no suprises here. */ err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0); if (err) goto out_io_size; /* * Allocate in the non-mappable portion, but force migrating to * the mappable portion on fault (LMEM -> LMEM) */ err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, IGT_MMAP_MIGRATE_TOPDOWN | IGT_MMAP_MIGRATE_FILL | IGT_MMAP_MIGRATE_EVICTABLE); if (err) goto out_io_size; /* * Allocate in the non-mappable portion, but force spilling into * system memory on fault (LMEM -> SMEM) */ err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system, IGT_MMAP_MIGRATE_TOPDOWN | IGT_MMAP_MIGRATE_FILL); if (err) goto out_io_size; /* * Allocate in the non-mappable portion, but since the mappable * portion is already full, and we can't spill to system memory, * then we should expect the fault to fail. */ err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, IGT_MMAP_MIGRATE_TOPDOWN | IGT_MMAP_MIGRATE_FILL | IGT_MMAP_MIGRATE_UNFAULTABLE); if (err) goto out_io_size; /* * Allocate in the non-mappable portion, but force migrating to * the mappable portion on fault (LMEM -> LMEM). We then also * simulate a gpu error when moving the pages when faulting the * pages, which should result in wedging the gpu and returning * SIGBUS in the fault handler, since we can't fallback to * memcpy. */ err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, IGT_MMAP_MIGRATE_TOPDOWN | IGT_MMAP_MIGRATE_FILL | IGT_MMAP_MIGRATE_EVICTABLE | IGT_MMAP_MIGRATE_FAIL_GPU | IGT_MMAP_MIGRATE_UNFAULTABLE); out_io_size: mr->io_size = saved_io_size; i915_ttm_buddy_man_force_visible_size(man, mr->io_size >> PAGE_SHIFT); if (err) return err; } return 0; } static const char *repr_mmap_type(enum i915_mmap_type type) { switch (type) { case I915_MMAP_TYPE_GTT: return "gtt"; case I915_MMAP_TYPE_WB: return "wb"; case I915_MMAP_TYPE_WC: return "wc"; case I915_MMAP_TYPE_UC: return "uc"; case I915_MMAP_TYPE_FIXED: return "fixed"; default: return "unknown"; } } static bool can_access(struct drm_i915_gem_object *obj) { bool access; i915_gem_object_lock(obj, NULL); access = i915_gem_object_has_struct_page(obj) || i915_gem_object_has_iomem(obj); i915_gem_object_unlock(obj); return access; } static int __igt_mmap_access(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { unsigned long __user *ptr; unsigned long A, B; unsigned long x, y; unsigned long addr; int err; u64 offset; memset(&A, 0xAA, sizeof(A)); memset(&B, 0xBB, sizeof(B)); if (!can_mmap(obj, type) || !can_access(obj)) return 0; err = __assign_mmap_offset(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ptr = (unsigned long __user *)addr; err = __put_user(A, ptr); if (err) { pr_err("%s(%s): failed to write into user mmap\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } intel_gt_flush_ggtt_writes(to_gt(i915)); err = access_process_vm(current, addr, &x, sizeof(x), 0); if (err != sizeof(x)) { pr_err("%s(%s): access_process_vm() read failed\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE); if (err != sizeof(B)) { pr_err("%s(%s): access_process_vm() write failed\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } intel_gt_flush_ggtt_writes(to_gt(i915)); err = __get_user(y, ptr); if (err) { pr_err("%s(%s): failed to read from user mmap\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } if (x != A || y != B) { pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n", obj->mm.region->name, repr_mmap_type(type), x, y); err = -EINVAL; goto out_unmap; } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_access(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; for_each_memory_region(mr, i915, id) { struct drm_i915_gem_object *obj; int err; if (mr->private) continue; obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) return err; } return 0; } static int __igt_mmap_gpu(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { struct intel_engine_cs *engine; unsigned long addr; u32 __user *ux; u32 bbe; int err; u64 offset; /* * Verify that the mmap access into the backing store aligns with * that of the GPU, i.e. that mmap is indeed writing into the same * page as being read by the GPU. */ if (!can_mmap(obj, type)) return 0; err = wc_set(obj); if (err == -ENXIO) err = gtt_set(obj); if (err) return err; err = __assign_mmap_offset(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ux = u64_to_user_ptr((u64)addr); bbe = MI_BATCH_BUFFER_END; if (put_user(bbe, ux)) { pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); err = -EFAULT; goto out_unmap; } if (type == I915_MMAP_TYPE_GTT) intel_gt_flush_ggtt_writes(to_gt(i915)); for_each_uabi_engine(engine, i915) { struct i915_request *rq; struct i915_vma *vma; struct i915_gem_ww_ctx ww; vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_unmap; } i915_gem_ww_ctx_init(&ww, false); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) goto out_ww; rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_unpin; } err = i915_vma_move_to_active(vma, rq, 0); err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); i915_request_get(rq); i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = drm_info_printer(engine->i915->drm.dev); pr_err("%s(%s, %s): Failed to execute batch\n", __func__, engine->name, obj->mm.region->name); intel_engine_dump(engine, &p, "%s\n", engine->name); intel_gt_set_wedged(engine->gt); err = -EIO; } i915_request_put(rq); out_unpin: i915_vma_unpin(vma); out_ww: if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); if (err) goto out_unmap; } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_gpu(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; for_each_memory_region(mr, i915, id) { struct drm_i915_gem_object *obj; int err; if (mr->private) continue; obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) return err; } return 0; } static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { pte_t ptent = ptep_get(pte); if (!pte_present(ptent) || pte_none(ptent)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; } return 0; } static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { pte_t ptent = ptep_get(pte); if (pte_present(ptent) && !pte_none(ptent)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; } return 0; } static int check_present(unsigned long addr, unsigned long len) { return apply_to_page_range(current->mm, addr, len, check_present_pte, (void *)addr); } static int check_absent(unsigned long addr, unsigned long len) { return apply_to_page_range(current->mm, addr, len, check_absent_pte, (void *)addr); } static int prefault_range(u64 start, u64 len) { const char __user *addr, *end; char __maybe_unused c; int err; addr = u64_to_user_ptr(start); end = addr + len; for (; addr < end; addr += PAGE_SIZE) { err = __get_user(c, addr); if (err) return err; } return __get_user(c, end - 1); } static int __igt_mmap_revoke(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { unsigned long addr; int err; u64 offset; if (!can_mmap(obj, type)) return 0; err = __assign_mmap_offset(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; err = prefault_range(addr, obj->base.size); if (err) goto out_unmap; err = check_present(addr, obj->base.size); if (err) { pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap; } /* * After unbinding the object from the GGTT, its address may be reused * for other objects. Ergo we have to revoke the previous mmap PTE * access as it no longer points to the same object. */ i915_gem_object_lock(obj, NULL); err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); i915_gem_object_unlock(obj); if (err) { pr_err("Failed to unbind object!\n"); goto out_unmap; } if (type != I915_MMAP_TYPE_GTT) { i915_gem_object_lock(obj, NULL); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); if (i915_gem_object_has_pages(obj)) { pr_err("Failed to put-pages object!\n"); err = -EINVAL; goto out_unmap; } } err = check_absent(addr, obj->base.size); if (err) { pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap; } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_revoke(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; for_each_memory_region(mr, i915, id) { struct drm_i915_gem_object *obj; int err; if (mr->private) continue; obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) return err; } return 0; } int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), SUBTEST(igt_mmap), SUBTEST(igt_mmap_migrate), SUBTEST(igt_mmap_access), SUBTEST(igt_mmap_revoke), SUBTEST(igt_mmap_gpu), }; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "i915_drv.h" #include "i915_selftest.h" #include "gem/i915_gem_context.h" #include "mock_context.h" #include "mock_dmabuf.h" #include "igt_gem_utils.h" #include "selftests/mock_drm.h" #include "selftests/mock_gem_device.h" static int igt_dmabuf_export(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); dmabuf = i915_gem_prime_export(&obj->base, 0); i915_gem_object_put(obj); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%d\n", (int)PTR_ERR(dmabuf)); return PTR_ERR(dmabuf); } dma_buf_put(dmabuf); return 0; } static int igt_dmabuf_import_self(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj, *import_obj; struct drm_gem_object *import; struct dma_buf *dmabuf; int err; obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); dmabuf = i915_gem_prime_export(&obj->base, 0); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%d\n", (int)PTR_ERR(dmabuf)); err = PTR_ERR(dmabuf); goto out; } import = i915_gem_prime_import(&i915->drm, dmabuf); if (IS_ERR(import)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(import)); err = PTR_ERR(import); goto out_dmabuf; } import_obj = to_intel_bo(import); if (import != &obj->base) { pr_err("i915_gem_prime_import created a new object!\n"); err = -EINVAL; goto out_import; } i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); i915_gem_object_unlock(import_obj); if (err) { pr_err("Same object dma-buf get_pages failed!\n"); goto out_import; } err = 0; out_import: i915_gem_object_put(import_obj); out_dmabuf: dma_buf_put(dmabuf); out: i915_gem_object_put(obj); return err; } static int igt_dmabuf_import_same_driver_lmem(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0]; struct drm_i915_gem_object *obj; struct drm_gem_object *import; struct dma_buf *dmabuf; int err; if (!lmem) return 0; force_different_devices = true; obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } dmabuf = i915_gem_prime_export(&obj->base, 0); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%ld\n", PTR_ERR(dmabuf)); err = PTR_ERR(dmabuf); goto out; } /* * We expect an import of an LMEM-only object to fail with * -EOPNOTSUPP because it can't be migrated to SMEM. */ import = i915_gem_prime_import(&i915->drm, dmabuf); if (!IS_ERR(import)) { drm_gem_object_put(import); pr_err("i915_gem_prime_import succeeded when it shouldn't have\n"); err = -EINVAL; } else if (PTR_ERR(import) != -EOPNOTSUPP) { pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", PTR_ERR(import)); err = PTR_ERR(import); } else { err = 0; } dma_buf_put(dmabuf); out: i915_gem_object_put(obj); out_ret: force_different_devices = false; return err; } static int verify_access(struct drm_i915_private *i915, struct drm_i915_gem_object *native_obj, struct drm_i915_gem_object *import_obj) { struct i915_gem_engines_iter it; struct i915_gem_context *ctx; struct intel_context *ce; struct i915_vma *vma; struct file *file; u32 *vaddr; int err = 0, i; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_file; } for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (intel_engine_can_store_dword(ce->engine)) break; } i915_gem_context_unlock_engines(ctx); if (!ce) goto out_file; vma = i915_vma_instance(import_obj, ce->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_file; } err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) goto out_file; err = igt_gpu_fill_dw(ce, vma, 0, vma->size >> PAGE_SHIFT, 0xdeadbeaf); i915_vma_unpin(vma); if (err) goto out_file; err = i915_gem_object_wait(import_obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) goto out_file; vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_file; } for (i = 0; i < native_obj->base.size / sizeof(u32); i += PAGE_SIZE / sizeof(u32)) { if (vaddr[i] != 0xdeadbeaf) { pr_err("Data mismatch [%d]=%u\n", i, vaddr[i]); err = -EINVAL; goto out_file; } } out_file: fput(file); return err; } static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, struct intel_memory_region **regions, unsigned int num_regions) { struct drm_i915_gem_object *obj, *import_obj; struct drm_gem_object *import; struct dma_buf *dmabuf; struct dma_buf_attachment *import_attach; struct sg_table *st; long timeout; int err; force_different_devices = true; obj = __i915_gem_object_create_user(i915, SZ_8M, regions, num_regions); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } dmabuf = i915_gem_prime_export(&obj->base, 0); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%ld\n", PTR_ERR(dmabuf)); err = PTR_ERR(dmabuf); goto out; } import = i915_gem_prime_import(&i915->drm, dmabuf); if (IS_ERR(import)) { pr_err("i915_gem_prime_import failed with err=%ld\n", PTR_ERR(import)); err = PTR_ERR(import); goto out_dmabuf; } import_obj = to_intel_bo(import); if (import == &obj->base) { pr_err("i915_gem_prime_import reused gem object!\n"); err = -EINVAL; goto out_import; } i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); if (err) { pr_err("Different objects dma-buf get_pages failed!\n"); i915_gem_object_unlock(import_obj); goto out_import; } /* * If the exported object is not in system memory, something * weird is going on. TODO: When p2p is supported, this is no * longer considered weird. */ if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) { pr_err("Exported dma-buf is not in system memory\n"); err = -EINVAL; } i915_gem_object_unlock(import_obj); err = verify_access(i915, obj, import_obj); if (err) goto out_import; /* Now try a fake an importer */ import_attach = dma_buf_attach(dmabuf, obj->base.dev->dev); if (IS_ERR(import_attach)) { err = PTR_ERR(import_attach); goto out_import; } st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL); if (IS_ERR(st)) { err = PTR_ERR(st); goto out_detach; } timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE, true, 5 * HZ); if (!timeout) { pr_err("dmabuf wait for exclusive fence timed out.\n"); timeout = -ETIME; } err = timeout > 0 ? 0 : timeout; dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL); out_detach: dma_buf_detach(dmabuf, import_attach); out_import: i915_gem_object_put(import_obj); out_dmabuf: dma_buf_put(dmabuf); out: i915_gem_object_put(obj); out_ret: force_different_devices = false; return err; } static int igt_dmabuf_import_same_driver_smem(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM]; return igt_dmabuf_import_same_driver(i915, &smem, 1); } static int igt_dmabuf_import_same_driver_lmem_smem(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *regions[2]; if (!i915->mm.regions[INTEL_REGION_LMEM_0]) return 0; regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0]; regions[1] = i915->mm.regions[INTEL_REGION_SMEM]; return igt_dmabuf_import_same_driver(i915, regions, 2); } static int igt_dmabuf_import(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; struct iosys_map map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; dmabuf = mock_dmabuf(1); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(obj)); err = PTR_ERR(obj); goto out_dmabuf; } if (obj->base.dev != &i915->drm) { pr_err("i915_gem_prime_import created a non-i915 object!\n"); err = -EINVAL; goto out_obj; } if (obj->base.size != PAGE_SIZE) { pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n", (long long)obj->base.size, PAGE_SIZE); err = -EINVAL; goto out_obj; } err = dma_buf_vmap_unlocked(dmabuf, &map); dma_map = err ? NULL : map.vaddr; if (!dma_map) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto out_obj; } if (0) { /* Can not yet map dmabuf */ obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(obj_map)) { err = PTR_ERR(obj_map); pr_err("i915_gem_object_pin_map failed with err=%d\n", err); goto out_dma_map; } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(dma_map, pattern[i], PAGE_SIZE); if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("imported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } for (i = 0; i < ARRAY_SIZE(pattern); i++) { memset(obj_map, pattern[i], PAGE_SIZE); if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) { err = -EINVAL; pr_err("exported vmap not all set to %x!\n", pattern[i]); i915_gem_object_unpin_map(obj); goto out_dma_map; } } i915_gem_object_unpin_map(obj); } err = 0; out_dma_map: dma_buf_vunmap_unlocked(dmabuf, &map); out_obj: i915_gem_object_put(obj); out_dmabuf: dma_buf_put(dmabuf); return err; } static int igt_dmabuf_import_ownership(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; struct iosys_map map; void *ptr; int err; dmabuf = mock_dmabuf(1); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); err = dma_buf_vmap_unlocked(dmabuf, &map); ptr = err ? NULL : map.vaddr; if (!ptr) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto err_dmabuf; } memset(ptr, 0xc5, PAGE_SIZE); dma_buf_vunmap_unlocked(dmabuf, &map); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { pr_err("i915_gem_prime_import failed with err=%d\n", (int)PTR_ERR(obj)); err = PTR_ERR(obj); goto err_dmabuf; } dma_buf_put(dmabuf); err = i915_gem_object_pin_pages_unlocked(obj); if (err) { pr_err("i915_gem_object_pin_pages failed with err=%d\n", err); goto out_obj; } err = 0; i915_gem_object_unpin_pages(obj); out_obj: i915_gem_object_put(obj); return err; err_dmabuf: dma_buf_put(dmabuf); return err; } static int igt_dmabuf_export_vmap(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; struct iosys_map map; void *ptr; int err; obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); dmabuf = i915_gem_prime_export(&obj->base, 0); if (IS_ERR(dmabuf)) { pr_err("i915_gem_prime_export failed with err=%d\n", (int)PTR_ERR(dmabuf)); err = PTR_ERR(dmabuf); goto err_obj; } i915_gem_object_put(obj); err = dma_buf_vmap_unlocked(dmabuf, &map); ptr = err ? NULL : map.vaddr; if (!ptr) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; goto out; } if (memchr_inv(ptr, 0, dmabuf->size)) { pr_err("Exported object not initialiased to zero!\n"); err = -EINVAL; goto out; } memset(ptr, 0xc5, dmabuf->size); err = 0; dma_buf_vunmap_unlocked(dmabuf, &map); out: dma_buf_put(dmabuf); return err; err_obj: i915_gem_object_put(obj); return err; } int i915_gem_dmabuf_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_dmabuf_export), SUBTEST(igt_dmabuf_import_self), SUBTEST(igt_dmabuf_import), SUBTEST(igt_dmabuf_import_ownership), SUBTEST(igt_dmabuf_export_vmap), }; struct drm_i915_private *i915; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; err = i915_subtests(tests, i915); mock_destroy_device(i915); return err; } int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_dmabuf_export), SUBTEST(igt_dmabuf_import_same_driver_lmem), SUBTEST(igt_dmabuf_import_same_driver_smem), SUBTEST(igt_dmabuf_import_same_driver_lmem_smem), }; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
// SPDX-License-Identifier: MIT /* * Copyright © 2020-2021 Intel Corporation */ #include "gt/intel_migrate.h" #include "gt/intel_gpu_commands.h" #include "gem/i915_gem_ttm_move.h" #include "i915_deps.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, struct intel_gt *gt, bool fill) { unsigned int i, count = obj->base.size / sizeof(u32); enum i915_map_type map_type = intel_gt_coherent_map_type(gt, obj, false); u32 *cur; int err = 0; assert_object_held(obj); cur = i915_gem_object_pin_map(obj, map_type); if (IS_ERR(cur)) return PTR_ERR(cur); if (fill) for (i = 0; i < count; ++i) *cur++ = i; else for (i = 0; i < count; ++i) if (*cur++ != i) { pr_err("Object content mismatch at location %d of %d\n", i, count); err = -EINVAL; break; } i915_gem_object_unpin_map(obj); return err; } static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src, enum intel_region_id dst) { struct drm_i915_private *i915 = gt->i915; struct intel_memory_region *src_mr = i915->mm.regions[src]; struct intel_memory_region *dst_mr = i915->mm.regions[dst]; struct drm_i915_gem_object *obj; struct i915_gem_ww_ctx ww; int err = 0; GEM_BUG_ON(!src_mr); GEM_BUG_ON(!dst_mr); /* Switch object backing-store on create */ obj = i915_gem_object_create_region(src_mr, dst_mr->min_page_size, 0, 0); if (IS_ERR(obj)) return PTR_ERR(obj); for_i915_gem_ww(&ww, err, true) { err = i915_gem_object_lock(obj, &ww); if (err) continue; err = igt_fill_check_buffer(obj, gt, true); if (err) continue; err = i915_gem_object_migrate(obj, &ww, dst); if (err) continue; err = i915_gem_object_pin_pages(obj); if (err) continue; if (i915_gem_object_can_migrate(obj, src)) err = -EINVAL; i915_gem_object_unpin_pages(obj); err = i915_gem_object_wait_migration(obj, true); if (err) continue; err = igt_fill_check_buffer(obj, gt, false); } i915_gem_object_put(obj); return err; } static int igt_smem_create_migrate(void *arg) { return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM); } static int igt_lmem_create_migrate(void *arg) { return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0); } static int igt_same_create_migrate(void *arg) { return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0); } static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, struct drm_i915_gem_object *obj, struct i915_vma *vma, bool silent_migrate) { int err; err = i915_gem_object_lock(obj, ww); if (err) return err; if (vma) { err = i915_vma_pin_ww(vma, ww, obj->base.size, 0, 0UL | PIN_OFFSET_FIXED | PIN_USER); if (err) { if (err != -EINTR && err != ERESTARTSYS && err != -EDEADLK) pr_err("Failed to pin vma.\n"); return err; } i915_vma_unpin(vma); } /* * Migration will implicitly unbind (asynchronously) any bound * vmas. */ if (i915_gem_object_is_lmem(obj)) { err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM); if (err) { if (!silent_migrate) pr_err("Object failed migration to smem\n"); if (err) return err; } if (i915_gem_object_is_lmem(obj)) { pr_err("object still backed by lmem\n"); err = -EINVAL; } if (!i915_gem_object_has_struct_page(obj)) { pr_err("object not backed by struct page\n"); err = -EINVAL; } } else { err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0); if (err) { if (!silent_migrate) pr_err("Object failed migration to lmem\n"); if (err) return err; } if (i915_gem_object_has_struct_page(obj)) { pr_err("object still backed by struct page\n"); err = -EINVAL; } if (!i915_gem_object_is_lmem(obj)) { pr_err("object not backed by lmem\n"); err = -EINVAL; } } return err; } static int __igt_lmem_pages_migrate(struct intel_gt *gt, struct i915_address_space *vm, struct i915_deps *deps, struct igt_spinner *spin, struct dma_fence *spin_fence, bool borked_migrate) { struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma = NULL; struct i915_gem_ww_ctx ww; struct i915_request *rq; int err; int i; /* From LMEM to shmem and back again */ obj = i915_gem_object_create_lmem(i915, SZ_2M, 0); if (IS_ERR(obj)) return PTR_ERR(obj); if (vm) { vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; } } /* Initial GPU fill, sync, CPU initialization. */ for_i915_gem_ww(&ww, err, true) { err = i915_gem_object_lock(obj, &ww); if (err) continue; err = ____i915_gem_object_get_pages(obj); if (err) continue; err = intel_migrate_clear(&gt->migrate, &ww, deps, obj->mm.pages->sgl, obj->pat_index, i915_gem_object_is_lmem(obj), 0xdeadbeaf, &rq); if (rq) { err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err) dma_resv_add_fence(obj->base.resv, &rq->fence, DMA_RESV_USAGE_KERNEL); i915_request_put(rq); } if (err) continue; if (!vma) { err = igt_fill_check_buffer(obj, gt, true); if (err) continue; } } if (err) goto out_put; /* * Migrate to and from smem without explicitly syncing. * Finalize with data in smem for fast readout. */ for (i = 1; i <= 5; ++i) { for_i915_gem_ww(&ww, err, true) err = lmem_pages_migrate_one(&ww, obj, vma, borked_migrate); if (err) goto out_put; } err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_put; if (spin) { if (dma_fence_is_signaled(spin_fence)) { pr_err("Spinner was terminated by hangcheck.\n"); err = -EBUSY; goto out_unlock; } igt_spinner_end(spin); } /* Finally sync migration and check content. */ err = i915_gem_object_wait_migration(obj, true); if (err) goto out_unlock; if (vma) { err = i915_vma_wait_for_bind(vma); if (err) goto out_unlock; } else { err = igt_fill_check_buffer(obj, gt, false); } out_unlock: i915_gem_object_unlock(obj); out_put: i915_gem_object_put(obj); return err; } static int igt_lmem_pages_failsafe_migrate(void *arg) { int fail_gpu, fail_alloc, ban_memcpy, ret; struct intel_gt *gt = arg; for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) { for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) { pr_info("Simulated failure modes: gpu: %d, alloc:%d, ban_memcpy: %d\n", fail_gpu, fail_alloc, ban_memcpy); i915_ttm_migrate_set_ban_memcpy(ban_memcpy); i915_ttm_migrate_set_failure_modes(fail_gpu, fail_alloc); ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL, ban_memcpy && fail_gpu); if (ban_memcpy && fail_gpu) { struct intel_gt *__gt; unsigned int id; if (ret != -EIO) { pr_err("expected -EIO, got (%d)\n", ret); ret = -EINVAL; } else { ret = 0; } for_each_gt(__gt, gt->i915, id) { intel_wakeref_t wakeref; bool wedged; mutex_lock(&__gt->reset.mutex); wedged = test_bit(I915_WEDGED, &__gt->reset.flags); mutex_unlock(&__gt->reset.mutex); if (fail_gpu && !fail_alloc) { if (!wedged) { pr_err("gt(%u) not wedged\n", id); ret = -EINVAL; continue; } } else if (wedged) { pr_err("gt(%u) incorrectly wedged\n", id); ret = -EINVAL; } else { continue; } wakeref = intel_runtime_pm_get(__gt->uncore->rpm); igt_global_reset_lock(__gt); intel_gt_reset(__gt, ALL_ENGINES, NULL); igt_global_reset_unlock(__gt); intel_runtime_pm_put(__gt->uncore->rpm, wakeref); } if (ret) goto out_err; } } } } out_err: i915_ttm_migrate_set_failure_modes(false, false); i915_ttm_migrate_set_ban_memcpy(false); return ret; } /* * This subtest tests that unbinding at migration is indeed performed * async. We launch a spinner and a number of migrations depending on * that spinner to have terminated. Before each migration we bind a * vma, which should then be async unbound by the migration operation. * If we are able to schedule migrations without blocking while the * spinner is still running, those unbinds are indeed async and non- * blocking. * * Note that each async bind operation is awaiting the previous migration * due to the moving fence resulting from the migration. */ static int igt_async_migrate(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; struct i915_ppgtt *ppgtt; struct igt_spinner spin; int err; ppgtt = i915_ppgtt_create(gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); if (igt_spinner_init(&spin, gt)) { err = -ENOMEM; goto out_spin; } for_each_engine(engine, gt, id) { struct ttm_operation_ctx ctx = { .interruptible = true }; struct dma_fence *spin_fence; struct intel_context *ce; struct i915_request *rq; struct i915_deps deps; ce = intel_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out_ce; } /* * Use MI_NOOP, making the spinner non-preemptible. If there * is a code path where we fail async operation due to the * running spinner, we will block and fail to end the * spinner resulting in a deadlock. But with a non- * preemptible spinner, hangcheck will terminate the spinner * for us, and we will later detect that and fail the test. */ rq = igt_spinner_create_request(&spin, ce, MI_NOOP); intel_context_put(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_ce; } i915_deps_init(&deps, GFP_KERNEL); err = i915_deps_add_dependency(&deps, &rq->fence, &ctx); spin_fence = dma_fence_get(&rq->fence); i915_request_add(rq); if (err) goto out_ce; err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin, spin_fence, false); i915_deps_fini(&deps); dma_fence_put(spin_fence); if (err) goto out_ce; } out_ce: igt_spinner_fini(&spin); out_spin: i915_vm_put(&ppgtt->vm); return err; } /* * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while * arming the migration error check and block async migration. This * will cause us to deadlock and hangcheck will terminate the spinner * causing the test to fail. */ #define ASYNC_FAIL_ALLOC 1 static int igt_lmem_async_migrate(void *arg) { int fail_gpu, fail_alloc, ban_memcpy, ret; struct intel_gt *gt = arg; for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) { for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) { pr_info("Simulated failure modes: gpu: %d, alloc: %d, ban_memcpy: %d\n", fail_gpu, fail_alloc, ban_memcpy); i915_ttm_migrate_set_ban_memcpy(ban_memcpy); i915_ttm_migrate_set_failure_modes(fail_gpu, fail_alloc); ret = igt_async_migrate(gt); if (fail_gpu && ban_memcpy) { struct intel_gt *__gt; unsigned int id; if (ret != -EIO) { pr_err("expected -EIO, got (%d)\n", ret); ret = -EINVAL; } else { ret = 0; } for_each_gt(__gt, gt->i915, id) { intel_wakeref_t wakeref; bool wedged; mutex_lock(&__gt->reset.mutex); wedged = test_bit(I915_WEDGED, &__gt->reset.flags); mutex_unlock(&__gt->reset.mutex); if (fail_gpu && !fail_alloc) { if (!wedged) { pr_err("gt(%u) not wedged\n", id); ret = -EINVAL; continue; } } else if (wedged) { pr_err("gt(%u) incorrectly wedged\n", id); ret = -EINVAL; } else { continue; } wakeref = intel_runtime_pm_get(__gt->uncore->rpm); igt_global_reset_lock(__gt); intel_gt_reset(__gt, ALL_ENGINES, NULL); igt_global_reset_unlock(__gt); intel_runtime_pm_put(__gt->uncore->rpm, wakeref); } } if (ret) goto out_err; } } } out_err: i915_ttm_migrate_set_failure_modes(false, false); i915_ttm_migrate_set_ban_memcpy(false); return ret; } int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_smem_create_migrate), SUBTEST(igt_lmem_create_migrate), SUBTEST(igt_same_create_migrate), SUBTEST(igt_lmem_pages_failsafe_migrate), SUBTEST(igt_lmem_async_migrate), }; if (!HAS_LMEM(i915)) return 0; return intel_gt_live_subtests(tests, to_gt(i915)); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "i915_scatterlist.h" #include "huge_gem_object.h" static void huge_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { unsigned long nreal = obj->scratch / PAGE_SIZE; struct sgt_iter sgt_iter; struct page *page; for_each_sgt_page(page, sgt_iter, pages) { __free_page(page); if (!--nreal) break; } sg_free_table(pages); kfree(pages); } static int huge_get_pages(struct drm_i915_gem_object *obj) { #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL) const unsigned long nreal = obj->scratch / PAGE_SIZE; unsigned int npages; /* restricted by sg_alloc_table */ struct scatterlist *sg, *src, *end; struct sg_table *pages; unsigned long n; if (overflows_type(obj->base.size / PAGE_SIZE, npages)) return -E2BIG; npages = obj->base.size / PAGE_SIZE; pages = kmalloc(sizeof(*pages), GFP); if (!pages) return -ENOMEM; if (sg_alloc_table(pages, npages, GFP)) { kfree(pages); return -ENOMEM; } sg = pages->sgl; for (n = 0; n < nreal; n++) { struct page *page; page = alloc_page(GFP | __GFP_HIGHMEM); if (!page) { sg_mark_end(sg); goto err; } sg_set_page(sg, page, PAGE_SIZE, 0); sg = __sg_next(sg); } if (nreal < npages) { for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) { sg_set_page(sg, sg_page(src), PAGE_SIZE, 0); src = __sg_next(src); if (src == end) src = pages->sgl; } } if (i915_gem_gtt_prepare_pages(obj, pages)) goto err; __i915_gem_object_set_pages(obj, pages); return 0; err: huge_free_pages(obj, pages); return -ENOMEM; #undef GFP } static void huge_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { i915_gem_gtt_finish_pages(obj, pages); huge_free_pages(obj, pages); obj->mm.dirty = false; } static const struct drm_i915_gem_object_ops huge_ops = { .name = "huge-gem", .get_pages = huge_get_pages, .put_pages = huge_put_pages, }; struct drm_i915_gem_object * huge_gem_object(struct drm_i915_private *i915, phys_addr_t phys_size, dma_addr_t dma_size) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; GEM_BUG_ON(!phys_size || phys_size > dma_size); GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE)); if (overflows_type(dma_size, obj->base.size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); i915_gem_object_init(obj, &huge_ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); obj->scratch = phys_size; return obj; }
linux-master
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "i915_selftest.h" #include "selftests/mock_gem_device.h" static int mock_phys_object(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; int err; /* Create an object and bind it to a contiguous set of physical pages, * i.e. exercise the i915_gem_object_phys API. */ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("i915_gem_object_create failed, err=%d\n", err); goto out; } i915_gem_object_lock(obj, NULL); if (!i915_gem_object_has_struct_page(obj)) { i915_gem_object_unlock(obj); err = -EINVAL; pr_err("shmem has no struct page\n"); goto out_obj; } err = i915_gem_object_attach_phys(obj, PAGE_SIZE); i915_gem_object_unlock(obj); if (err) { pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); goto out_obj; } if (i915_gem_object_has_struct_page(obj)) { pr_err("i915_gem_object_attach_phys did not create a phys object\n"); err = -EINVAL; goto out_obj; } if (!atomic_read(&obj->mm.pages_pin_count)) { pr_err("i915_gem_object_attach_phys did not pin its phys pages\n"); err = -EINVAL; goto out_obj; } /* Make the object dirty so that put_pages must do copy back the data */ i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n", err); goto out_obj; } out_obj: i915_gem_object_put(obj); out: return err; } int i915_gem_phys_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(mock_phys_object), }; struct drm_i915_private *i915; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; err = i915_subtests(tests, i915); mock_destroy_device(i915); return err; }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include "i915_selftest.h" #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_regs.h" #include "gem/i915_gem_lmem.h" #include "gem/selftests/igt_gem_utils.h" #include "selftests/igt_flush_test.h" #include "selftests/mock_drm.h" #include "selftests/i915_random.h" #include "huge_gem_object.h" #include "mock_context.h" #define OW_SIZE 16 /* in bytes */ #define F_SUBTILE_SIZE 64 /* in bytes */ #define F_TILE_WIDTH 128 /* in bytes */ #define F_TILE_HEIGHT 32 /* in pixels */ #define F_SUBTILE_WIDTH OW_SIZE /* in bytes */ #define F_SUBTILE_HEIGHT 4 /* in pixels */ static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp) { int tile_base; int tile_x, tile_y; int swizzle, subtile; int pixel_size = bpp / 8; int pos; /* * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a * so we can use the same table to tile and until. */ static const u8 f_subtile_map[] = { 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31, 32, 33, 34, 35, 40, 41, 42, 43, 36, 37, 38, 39, 44, 45, 46, 47, 48, 49, 50, 51, 56, 57, 58, 59, 52, 53, 54, 55, 60, 61, 62, 63 }; x *= pixel_size; /* * Where does the 4k tile start (in bytes)? This is the same for Y and * F so we can use the Y-tile algorithm to get to that point. */ tile_base = y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT + x / F_TILE_WIDTH * 4096; /* Find pixel within tile */ tile_x = x % F_TILE_WIDTH; tile_y = y % F_TILE_HEIGHT; /* And figure out the subtile within the 4k tile */ subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH; /* Swizzle the subtile number according to the bspec diagram */ swizzle = f_subtile_map[subtile]; /* Calculate new position */ pos = tile_base + swizzle * F_SUBTILE_SIZE + tile_y % F_SUBTILE_HEIGHT * OW_SIZE + tile_x % F_SUBTILE_WIDTH; GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size)); return pos / pixel_size * 4; } enum client_tiling { CLIENT_TILING_LINEAR, CLIENT_TILING_X, CLIENT_TILING_Y, CLIENT_TILING_4, CLIENT_NUM_TILING_TYPES }; #define WIDTH 512 #define HEIGHT 32 struct blit_buffer { struct i915_vma *vma; u32 start_val; enum client_tiling tiling; }; struct tiled_blits { struct intel_context *ce; struct blit_buffer buffers[3]; struct blit_buffer scratch; struct i915_vma *batch; u64 hole; u64 align; u32 width; u32 height; }; static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) { int gen = GRAPHICS_VER(i915); /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */ drm_WARN_ON(&i915->drm, gen < 9); if (gen < 12) return true; if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) return false; return HAS_DISPLAY(i915); } static bool fast_blit_ok(const struct blit_buffer *buf) { /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */ if (GRAPHICS_VER(buf->vma->vm->i915) < 9) return false; /* filter out platforms with unsupported X-tile support in fastblit */ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915)) return false; return true; } static int prepare_blit(const struct tiled_blits *t, struct blit_buffer *dst, struct blit_buffer *src, struct drm_i915_gem_object *batch) { const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); bool use_64b_reloc = ver >= 8; u32 src_pitch, dst_pitch; u32 cmd, *cs; cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); if (IS_ERR(cs)) return PTR_ERR(cs); if (fast_blit_ok(dst) && fast_blit_ok(src)) { struct intel_gt *gt = t->ce->engine->gt; u32 src_tiles = 0, dst_tiles = 0; u32 src_4t = 0, dst_4t = 0; /* Need to program BLIT_CCTL if it is not done previously * before using XY_FAST_COPY_BLT */ *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base)); *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) | BLIT_CCTL_DST_MOCS(gt->mocs.uc_index)); src_pitch = t->width; /* in dwords */ if (src->tiling == CLIENT_TILING_4) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; } else if (src->tiling == CLIENT_TILING_Y) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); } else if (src->tiling == CLIENT_TILING_X) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); } else { src_pitch *= 4; /* in bytes */ } dst_pitch = t->width; /* in dwords */ if (dst->tiling == CLIENT_TILING_4) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; } else if (dst->tiling == CLIENT_TILING_Y) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); } else if (dst->tiling == CLIENT_TILING_X) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); } else { dst_pitch *= 4; /* in bytes */ } *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) | src_tiles | dst_tiles; *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch; *cs++ = 0; *cs++ = t->height << 16 | t->width; *cs++ = lower_32_bits(i915_vma_offset(dst->vma)); *cs++ = upper_32_bits(i915_vma_offset(dst->vma)); *cs++ = 0; *cs++ = src_pitch; *cs++ = lower_32_bits(i915_vma_offset(src->vma)); *cs++ = upper_32_bits(i915_vma_offset(src->vma)); } else { if (ver >= 6) { *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(BCS_SWCTRL); cmd = (BCS_SRC_Y | BCS_DST_Y) << 16; if (src->tiling == CLIENT_TILING_Y) cmd |= BCS_SRC_Y; if (dst->tiling == CLIENT_TILING_Y) cmd |= BCS_DST_Y; *cs++ = cmd; cmd = MI_FLUSH_DW; if (ver >= 8) cmd++; *cs++ = cmd; *cs++ = 0; *cs++ = 0; *cs++ = 0; } cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2); if (ver >= 8) cmd += 2; src_pitch = t->width * 4; if (src->tiling) { cmd |= XY_SRC_COPY_BLT_SRC_TILED; src_pitch /= 4; } dst_pitch = t->width * 4; if (dst->tiling) { cmd |= XY_SRC_COPY_BLT_DST_TILED; dst_pitch /= 4; } *cs++ = cmd; *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch; *cs++ = 0; *cs++ = t->height << 16 | t->width; *cs++ = lower_32_bits(i915_vma_offset(dst->vma)); if (use_64b_reloc) *cs++ = upper_32_bits(i915_vma_offset(dst->vma)); *cs++ = 0; *cs++ = src_pitch; *cs++ = lower_32_bits(i915_vma_offset(src->vma)); if (use_64b_reloc) *cs++ = upper_32_bits(i915_vma_offset(src->vma)); } *cs++ = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(batch); i915_gem_object_unpin_map(batch); return 0; } static void tiled_blits_destroy_buffers(struct tiled_blits *t) { int i; for (i = 0; i < ARRAY_SIZE(t->buffers); i++) i915_vma_put(t->buffers[i].vma); i915_vma_put(t->scratch.vma); i915_vma_put(t->batch); } static struct i915_vma * __create_vma(struct tiled_blits *t, size_t size, bool lmem) { struct drm_i915_private *i915 = t->ce->vm->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; if (lmem) obj = i915_gem_object_create_lmem(i915, size, 0); else obj = i915_gem_object_create_shmem(i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); vma = i915_vma_instance(obj, t->ce->vm, NULL); if (IS_ERR(vma)) i915_gem_object_put(obj); return vma; } static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem) { return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem); } static int tiled_blits_create_buffers(struct tiled_blits *t, int width, int height, struct rnd_state *prng) { struct drm_i915_private *i915 = t->ce->engine->i915; int i; t->width = width; t->height = height; t->batch = __create_vma(t, PAGE_SIZE, false); if (IS_ERR(t->batch)) return PTR_ERR(t->batch); t->scratch.vma = create_vma(t, false); if (IS_ERR(t->scratch.vma)) { i915_vma_put(t->batch); return PTR_ERR(t->scratch.vma); } for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { struct i915_vma *vma; vma = create_vma(t, HAS_LMEM(i915) && i % 2); if (IS_ERR(vma)) { tiled_blits_destroy_buffers(t); return PTR_ERR(vma); } t->buffers[i].vma = vma; t->buffers[i].tiling = i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng); /* Platforms support either TileY or Tile4, not both */ if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y) t->buffers[i].tiling = CLIENT_TILING_4; else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4) t->buffers[i].tiling = CLIENT_TILING_Y; } return 0; } static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val) { int i; t->scratch.start_val = val; for (i = 0; i < t->width * t->height; i++) vaddr[i] = val++; i915_gem_object_flush_map(t->scratch.vma->obj); } static u64 swizzle_bit(unsigned int bit, u64 offset) { return (offset & BIT_ULL(bit)) >> (bit - 6); } static u64 tiled_offset(const struct intel_gt *gt, u64 v, unsigned int stride, enum client_tiling tiling, int x_pos, int y_pos) { unsigned int swizzle; u64 x, y; if (tiling == CLIENT_TILING_LINEAR) return v; y = div64_u64_rem(v, stride, &x); if (tiling == CLIENT_TILING_4) { v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); /* no swizzling for f-tiling */ swizzle = I915_BIT_6_SWIZZLE_NONE; } else if (tiling == CLIENT_TILING_X) { v = div64_u64_rem(y, 8, &y) * stride * 8; v += y * 512; v += div64_u64_rem(x, 512, &x) << 12; v += x; swizzle = gt->ggtt->bit_6_swizzle_x; } else { const unsigned int ytile_span = 16; const unsigned int ytile_height = 512; v = div64_u64_rem(y, 32, &y) * stride * 32; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; v += x; swizzle = gt->ggtt->bit_6_swizzle_y; } switch (swizzle) { case I915_BIT_6_SWIZZLE_9: v ^= swizzle_bit(9, v); break; case I915_BIT_6_SWIZZLE_9_10: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); break; case I915_BIT_6_SWIZZLE_9_11: v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); break; case I915_BIT_6_SWIZZLE_9_10_11: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); break; } return v; } static const char *repr_tiling(enum client_tiling tiling) { switch (tiling) { case CLIENT_TILING_LINEAR: return "linear"; case CLIENT_TILING_X: return "X"; case CLIENT_TILING_Y: return "Y"; case CLIENT_TILING_4: return "F"; default: return "unknown"; } } static int verify_buffer(const struct tiled_blits *t, struct blit_buffer *buf, struct rnd_state *prng) { const u32 *vaddr; int ret = 0; int x, y, p; x = i915_prandom_u32_max_state(t->width, prng); y = i915_prandom_u32_max_state(t->height, prng); p = y * t->width + x; vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); if (vaddr[0] != buf->start_val) { ret = -EINVAL; } else { u64 v = tiled_offset(buf->vma->vm->gt, p * 4, t->width * 4, buf->tiling, x, y); if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p) ret = -EINVAL; } if (ret) { pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n", repr_tiling(buf->tiling), x, y, buf->start_val); igt_hexdump(vaddr, 4096); } i915_gem_object_unpin_map(buf->vma->obj); return ret; } static int pin_buffer(struct i915_vma *vma, u64 addr) { int err; if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) { err = i915_vma_unbind_unlocked(vma); if (err) return err; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr); if (err) return err; GEM_BUG_ON(i915_vma_offset(vma) != addr); return 0; } static int tiled_blit(struct tiled_blits *t, struct blit_buffer *dst, u64 dst_addr, struct blit_buffer *src, u64 src_addr) { struct i915_request *rq; int err; err = pin_buffer(src->vma, src_addr); if (err) { pr_err("Cannot pin src @ %llx\n", src_addr); return err; } err = pin_buffer(dst->vma, dst_addr); if (err) { pr_err("Cannot pin dst @ %llx\n", dst_addr); goto err_src; } err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH); if (err) { pr_err("cannot pin batch\n"); goto err_dst; } err = prepare_blit(t, dst, src, t->batch->obj); if (err) goto err_bb; rq = intel_context_create_request(t->ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_bb; } err = igt_vma_move_to_active_unlocked(t->batch, rq, 0); if (!err) err = igt_vma_move_to_active_unlocked(src->vma, rq, 0); if (!err) err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0); if (!err) err = rq->engine->emit_bb_start(rq, i915_vma_offset(t->batch), i915_vma_size(t->batch), 0); i915_request_get(rq); i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 2) < 0) err = -ETIME; i915_request_put(rq); dst->start_val = src->start_val; err_bb: i915_vma_unpin(t->batch); err_dst: i915_vma_unpin(dst->vma); err_src: i915_vma_unpin(src->vma); return err; } static struct tiled_blits * tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng) { struct drm_mm_node hole; struct tiled_blits *t; u64 hole_size; int err; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return ERR_PTR(-ENOMEM); t->ce = intel_context_create(engine); if (IS_ERR(t->ce)) { err = PTR_ERR(t->ce); goto err_free; } t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL); t->align = max(t->align, i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM)); hole_size = 2 * round_up(WIDTH * HEIGHT * 4, t->align); hole_size *= 2; /* room to maneuver */ hole_size += 2 * t->align; /* padding on either side */ mutex_lock(&t->ce->vm->mutex); memset(&hole, 0, sizeof(hole)); err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole, hole_size, t->align, I915_COLOR_UNEVICTABLE, 0, U64_MAX, DRM_MM_INSERT_BEST); if (!err) drm_mm_remove_node(&hole); mutex_unlock(&t->ce->vm->mutex); if (err) { err = -ENODEV; goto err_put; } t->hole = hole.start + t->align; pr_info("Using hole at %llx\n", t->hole); err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng); if (err) goto err_put; return t; err_put: intel_context_put(t->ce); err_free: kfree(t); return ERR_PTR(err); } static void tiled_blits_destroy(struct tiled_blits *t) { tiled_blits_destroy_buffers(t); intel_context_put(t->ce); kfree(t); } static int tiled_blits_prepare(struct tiled_blits *t, struct rnd_state *prng) { u64 offset = round_up(t->width * t->height * 4, t->align); u32 *map; int err; int i; map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); /* Use scratch to fill objects */ for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { fill_scratch(t, map, prandom_u32_state(prng)); GEM_BUG_ON(verify_buffer(t, &t->scratch, prng)); err = tiled_blit(t, &t->buffers[i], t->hole + offset, &t->scratch, t->hole); if (err == 0) err = verify_buffer(t, &t->buffers[i], prng); if (err) { pr_err("Failed to create buffer %d\n", i); break; } } i915_gem_object_unpin_map(t->scratch.vma->obj); return err; } static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng) { u64 offset = round_up(t->width * t->height * 4, 2 * t->align); int err; /* We want to check position invariant tiling across GTT eviction */ err = tiled_blit(t, &t->buffers[1], t->hole + offset / 2, &t->buffers[0], t->hole + 2 * offset); if (err) return err; /* Simulating GTT eviction of the same buffer / layout */ t->buffers[2].tiling = t->buffers[0].tiling; /* Reposition so that we overlap the old addresses, and slightly off */ err = tiled_blit(t, &t->buffers[2], t->hole + t->align, &t->buffers[1], t->hole + 3 * offset / 2); if (err) return err; err = verify_buffer(t, &t->buffers[2], prng); if (err) return err; return 0; } static int __igt_client_tiled_blits(struct intel_engine_cs *engine, struct rnd_state *prng) { struct tiled_blits *t; int err; t = tiled_blits_create(engine, prng); if (IS_ERR(t)) return PTR_ERR(t); err = tiled_blits_prepare(t, prng); if (err) goto out; err = tiled_blits_bounce(t, prng); if (err) goto out; out: tiled_blits_destroy(t); return err; } static bool has_bit17_swizzle(int sw) { return (sw == I915_BIT_6_SWIZZLE_9_10_17 || sw == I915_BIT_6_SWIZZLE_9_17); } static bool bad_swizzling(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = to_gt(i915)->ggtt; if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return true; if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) || has_bit17_swizzle(ggtt->bit_6_swizzle_y)) return true; return false; } static int igt_client_tiled_blits(void *arg) { struct drm_i915_private *i915 = arg; I915_RND_STATE(prng); int inst = 0; /* Test requires explicit BLT tiling controls */ if (GRAPHICS_VER(i915) < 4) return 0; if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */ return 0; do { struct intel_engine_cs *engine; int err; engine = intel_engine_lookup_user(i915, I915_ENGINE_CLASS_COPY, inst++); if (!engine) return 0; err = __igt_client_tiled_blits(engine, &prng); if (err == -ENODEV) err = 0; if (err) return err; } while (1); } int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_client_tiled_blits), }; if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "i915_selftest.h" #include "huge_gem_object.h" #include "selftests/igt_flush_test.h" #include "selftests/mock_gem_device.h" static int igt_gem_object(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; int err; /* Basic test to ensure we can create an object */ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("i915_gem_object_create failed, err=%d\n", err); goto out; } err = 0; i915_gem_object_put(obj); out: return err; } static int igt_gem_huge(void *arg) { const unsigned long nreal = 509; /* just to be awkward */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; unsigned long n; int err; /* Basic sanitycheck of our huge fake object allocation */ obj = huge_gem_object(i915, nreal * PAGE_SIZE, to_gt(i915)->ggtt->vm.total + PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); err = i915_gem_object_pin_pages_unlocked(obj); if (err) { pr_err("Failed to allocate %lu pages (%lu total), err=%d\n", nreal, obj->base.size / PAGE_SIZE, err); goto out; } for (n = 0; n < obj->base.size / PAGE_SIZE; n++) { if (i915_gem_object_get_page(obj, n) != i915_gem_object_get_page(obj, n % nreal)) { pr_err("Page lookup mismatch at index %lu [%lu]\n", n, n % nreal); err = -EINVAL; goto out_unpin; } } out_unpin: i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); return err; } int i915_gem_object_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_object), }; struct drm_i915_private *i915; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; err = i915_subtests(tests, i915); mock_destroy_device(i915); return err; } int i915_gem_object_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_huge), }; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2016 Intel Corporation */ #include "i915_file_private.h" #include "mock_context.h" #include "selftests/mock_drm.h" #include "selftests/mock_gtt.h" struct i915_gem_context * mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; struct i915_gem_engines *e; struct intel_sseu null_sseu = {}; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; kref_init(&ctx->ref); INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; INIT_WORK(&ctx->release_work, i915_gem_context_release_work); mutex_init(&ctx->mutex); spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); i915_gem_context_set_persistence(ctx); if (name) { struct i915_ppgtt *ppgtt; strncpy(ctx->name, name, sizeof(ctx->name) - 1); ppgtt = mock_ppgtt(i915, name); if (!ppgtt) goto err_free; ctx->vm = &ppgtt->vm; } mutex_init(&ctx->engines_mutex); e = default_engines(ctx, null_sseu); if (IS_ERR(e)) goto err_vm; RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); mutex_init(&ctx->lut_mutex); return ctx; err_vm: if (ctx->vm) i915_vm_put(ctx->vm); err_free: kfree(ctx); return NULL; } void mock_context_close(struct i915_gem_context *ctx) { context_close(ctx); } void mock_init_contexts(struct drm_i915_private *i915) { init_contexts(&i915->gem.contexts); } struct i915_gem_context * live_context(struct drm_i915_private *i915, struct file *file) { struct drm_i915_file_private *fpriv = to_drm_file(file)->driver_priv; struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; int err; u32 id; pc = proto_context_create(i915, 0); if (IS_ERR(pc)) return ERR_CAST(pc); ctx = i915_gem_create_context(i915, pc); proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; i915_gem_context_set_no_error_capture(ctx); err = xa_alloc(&fpriv->context_xa, &id, NULL, xa_limit_32b, GFP_KERNEL); if (err < 0) goto err_ctx; gem_context_register(ctx, fpriv, id); return ctx; err_ctx: context_close(ctx); return ERR_PTR(err); } struct i915_gem_context * live_context_for_engine(struct intel_engine_cs *engine, struct file *file) { struct i915_gem_engines *engines; struct i915_gem_context *ctx; struct intel_sseu null_sseu = {}; struct intel_context *ce; engines = alloc_engines(1); if (!engines) return ERR_PTR(-ENOMEM); ctx = live_context(engine->i915, file); if (IS_ERR(ctx)) { __free_engines(engines, 0); return ctx; } ce = intel_context_create(engine); if (IS_ERR(ce)) { __free_engines(engines, 0); return ERR_CAST(ce); } intel_context_set_gem(ce, ctx, null_sseu); engines->engines[0] = ce; engines->num_engines = 1; mutex_lock(&ctx->engines_mutex); i915_gem_context_set_user_engines(ctx); engines = rcu_replace_pointer(ctx->engines, engines, 1); mutex_unlock(&ctx->engines_mutex); engines_idle_release(ctx, engines); return ctx; } struct i915_gem_context * kernel_context(struct drm_i915_private *i915, struct i915_address_space *vm) { struct i915_gem_context *ctx; struct i915_gem_proto_context *pc; pc = proto_context_create(i915, 0); if (IS_ERR(pc)) return ERR_CAST(pc); if (vm) { if (pc->vm) i915_vm_put(pc->vm); pc->vm = i915_vm_get(vm); } ctx = i915_gem_create_context(i915, pc); proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; i915_gem_context_clear_bannable(ctx); i915_gem_context_set_persistence(ctx); i915_gem_context_set_no_error_capture(ctx); return ctx; } void kernel_context_close(struct i915_gem_context *ctx) { context_close(ctx); }
linux-master
drivers/gpu/drm/i915/gem/selftests/mock_context.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2017 Intel Corporation */ #include <linux/prime_numbers.h> #include <linux/string_helpers.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" #include "i915_selftest.h" #include "gem/selftests/igt_gem_utils.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_live_test.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" #include "selftests/mock_drm.h" #include "selftests/mock_gem_device.h" #include "huge_gem_object.h" #include "igt_gem_utils.h" #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context **ctx; struct igt_live_test t; struct file *file; unsigned long n; int err = -ENODEV; /* * Create as many contexts as we can feasibly get away with * and check we can switch between them rapidly. * * Serves as very simple stress test for submission and HW switching * between contexts. */ if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { err = -ENOMEM; goto out_file; } for (n = 0; n < nctx; n++) { ctx[n] = live_context(i915, file); if (IS_ERR(ctx[n])) { err = PTR_ERR(ctx[n]); goto out_ctx; } } for_each_uabi_engine(engine, i915) { struct i915_request *rq = NULL; unsigned long end_time, prime; ktime_t times[2] = {}; times[0] = ktime_get_raw(); for (n = 0; n < nctx; n++) { struct i915_request *this; this = igt_request_alloc(ctx[n], engine); if (IS_ERR(this)) { err = PTR_ERR(this); goto out_ctx; } if (rq) { i915_request_await_dma_fence(this, &rq->fence); i915_request_put(rq); } rq = i915_request_get(this); i915_request_add(this); } if (i915_request_wait(rq, 0, 10 * HZ) < 0) { pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(engine->gt); i915_request_put(rq); err = -EIO; goto out_ctx; } i915_request_put(rq); times[1] = ktime_get_raw(); pr_info("Populated %d contexts on %s in %lluns\n", nctx, engine->name, ktime_to_ns(times[1] - times[0])); err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_ctx; end_time = jiffies + i915_selftest.timeout_jiffies; for_each_prime_number_from(prime, 2, 8192) { times[1] = ktime_get_raw(); rq = NULL; for (n = 0; n < prime; n++) { struct i915_request *this; this = igt_request_alloc(ctx[n % nctx], engine); if (IS_ERR(this)) { err = PTR_ERR(this); goto out_ctx; } if (rq) { /* Force submission order */ i915_request_await_dma_fence(this, &rq->fence); i915_request_put(rq); } /* * This space is left intentionally blank. * * We do not actually want to perform any * action with this request, we just want * to measure the latency in allocation * and submission of our breadcrumbs - * ensuring that the bare request is sufficient * for the system to work (i.e. proper HEAD * tracking of the rings, interrupt handling, * etc). It also gives us the lowest bounds * for latency. */ rq = i915_request_get(this); i915_request_add(this); } GEM_BUG_ON(!rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Switching between %ld contexts timed out\n", prime); intel_gt_set_wedged(engine->gt); i915_request_put(rq); break; } i915_request_put(rq); times[1] = ktime_sub(ktime_get_raw(), times[1]); if (prime == 2) times[0] = times[1]; if (__igt_timeout(end_time, NULL)) break; } err = igt_live_test_end(&t); if (err) goto out_ctx; pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n", engine->name, ktime_to_ns(times[0]), prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1)); } out_ctx: kfree(ctx); out_file: fput(file); return err; } struct parallel_switch { struct kthread_worker *worker; struct kthread_work work; struct intel_context *ce[2]; int result; }; static void __live_parallel_switch1(struct kthread_work *work) { struct parallel_switch *arg = container_of(work, typeof(*arg), work); IGT_TIMEOUT(end_time); unsigned long count; count = 0; arg->result = 0; do { struct i915_request *rq = NULL; int n; for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) { struct i915_request *prev = rq; rq = i915_request_create(arg->ce[n]); if (IS_ERR(rq)) { i915_request_put(prev); arg->result = PTR_ERR(rq); break; } i915_request_get(rq); if (prev) { arg->result = i915_request_await_dma_fence(rq, &prev->fence); i915_request_put(prev); } i915_request_add(rq); } if (IS_ERR_OR_NULL(rq)) break; if (i915_request_wait(rq, 0, HZ) < 0) arg->result = -ETIME; i915_request_put(rq); count++; } while (!arg->result && !__igt_timeout(end_time, NULL)); pr_info("%s: %lu switches (sync) <%d>\n", arg->ce[0]->engine->name, count, arg->result); } static void __live_parallel_switchN(struct kthread_work *work) { struct parallel_switch *arg = container_of(work, typeof(*arg), work); struct i915_request *rq = NULL; IGT_TIMEOUT(end_time); unsigned long count; int n; count = 0; arg->result = 0; do { for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) { struct i915_request *prev = rq; rq = i915_request_create(arg->ce[n]); if (IS_ERR(rq)) { i915_request_put(prev); arg->result = PTR_ERR(rq); break; } i915_request_get(rq); if (prev) { arg->result = i915_request_await_dma_fence(rq, &prev->fence); i915_request_put(prev); } i915_request_add(rq); } count++; } while (!arg->result && !__igt_timeout(end_time, NULL)); if (!IS_ERR_OR_NULL(rq)) i915_request_put(rq); pr_info("%s: %lu switches (many) <%d>\n", arg->ce[0]->engine->name, count, arg->result); } static int live_parallel_switch(void *arg) { struct drm_i915_private *i915 = arg; static void (* const func[])(struct kthread_work *) = { __live_parallel_switch1, __live_parallel_switchN, NULL, }; struct parallel_switch *data = NULL; struct i915_gem_engines *engines; struct i915_gem_engines_iter it; void (* const *fn)(struct kthread_work *); struct i915_gem_context *ctx; struct intel_context *ce; struct file *file; int n, m, count; int err = 0; /* * Check we can process switches on all engines simultaneously. */ if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_file; } engines = i915_gem_context_lock_engines(ctx); count = engines->num_engines; data = kcalloc(count, sizeof(*data), GFP_KERNEL); if (!data) { i915_gem_context_unlock_engines(ctx); err = -ENOMEM; goto out_file; } m = 0; /* Use the first context as our template for the engines */ for_each_gem_engine(ce, engines, it) { err = intel_context_pin(ce); if (err) { i915_gem_context_unlock_engines(ctx); goto out; } data[m++].ce[0] = intel_context_get(ce); } i915_gem_context_unlock_engines(ctx); /* Clone the same set of engines into the other contexts */ for (n = 1; n < ARRAY_SIZE(data->ce); n++) { ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } for (m = 0; m < count; m++) { if (!data[m].ce[0]) continue; ce = intel_context_create(data[m].ce[0]->engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; } err = intel_context_pin(ce); if (err) { intel_context_put(ce); goto out; } data[m].ce[n] = ce; } } for (n = 0; n < count; n++) { struct kthread_worker *worker; if (!data[n].ce[0]) continue; worker = kthread_create_worker(0, "igt/parallel:%s", data[n].ce[0]->engine->name); if (IS_ERR(worker)) { err = PTR_ERR(worker); goto out; } data[n].worker = worker; } for (fn = func; !err && *fn; fn++) { struct igt_live_test t; err = igt_live_test_begin(&t, i915, __func__, ""); if (err) break; for (n = 0; n < count; n++) { if (!data[n].ce[0]) continue; data[n].result = 0; kthread_init_work(&data[n].work, *fn); kthread_queue_work(data[n].worker, &data[n].work); } for (n = 0; n < count; n++) { if (data[n].ce[0]) { kthread_flush_work(&data[n].work); if (data[n].result && !err) err = data[n].result; } } if (igt_live_test_end(&t)) { err = err ?: -EIO; break; } } out: for (n = 0; n < count; n++) { for (m = 0; m < ARRAY_SIZE(data->ce); m++) { if (!data[n].ce[m]) continue; intel_context_unpin(data[n].ce[m]); intel_context_put(data[n].ce[m]); } if (data[n].worker) kthread_destroy_worker(data[n].worker); } kfree(data); out_file: fput(file); return err; } static unsigned long real_page_count(struct drm_i915_gem_object *obj) { return huge_gem_object_phys_size(obj) >> PAGE_SHIFT; } static unsigned long fake_page_count(struct drm_i915_gem_object *obj) { return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; } static int gpu_fill(struct intel_context *ce, struct drm_i915_gem_object *obj, unsigned int dw) { struct i915_vma *vma; int err; GEM_BUG_ON(obj->base.size > ce->vm->total); GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); if (err) return err; /* * Within the GTT the huge objects maps every page onto * its 1024 real pages (using phys_pfn = dma_pfn % 1024). * We set the nth dword within the page using the nth * mapping via the GTT - this should exercise the GTT mapping * whilst checking that each context provides a unique view * into the object. */ err = igt_gpu_fill_dw(ce, vma, (dw * real_page_count(obj)) << PAGE_SHIFT | (dw * sizeof(u32)), real_page_count(obj), dw); i915_vma_unpin(vma); return err; } static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) { const bool has_llc = HAS_LLC(to_i915(obj->base.dev)); unsigned int need_flush; unsigned long n, m; int err; i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_write(obj, &need_flush); if (err) goto out; for (n = 0; n < real_page_count(obj); n++) { u32 *map; map = kmap_atomic(i915_gem_object_get_page(obj, n)); for (m = 0; m < DW_PER_PAGE; m++) map[m] = value; if (!has_llc) drm_clflush_virt_range(map, PAGE_SIZE); kunmap_atomic(map); } i915_gem_object_finish_access(obj); obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; obj->write_domain = 0; out: i915_gem_object_unlock(obj); return err; } static noinline int cpu_check(struct drm_i915_gem_object *obj, unsigned int idx, unsigned int max) { unsigned int needs_flush; unsigned long n; int err; i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) goto out_unlock; for (n = 0; n < real_page_count(obj); n++) { u32 *map, m; map = kmap_atomic(i915_gem_object_get_page(obj, n)); if (needs_flush & CLFLUSH_BEFORE) drm_clflush_virt_range(map, PAGE_SIZE); for (m = 0; m < max; m++) { if (map[m] != m) { pr_err("%pS: Invalid value at object %d page %ld/%ld, offset %d/%d: found %x expected %x\n", __builtin_return_address(0), idx, n, real_page_count(obj), m, max, map[m], m); err = -EINVAL; goto out_unmap; } } for (; m < DW_PER_PAGE; m++) { if (map[m] != STACK_MAGIC) { pr_err("%pS: Invalid value at object %d page %ld, offset %d: found %x expected %x (uninitialised)\n", __builtin_return_address(0), idx, n, m, map[m], STACK_MAGIC); err = -EINVAL; goto out_unmap; } } out_unmap: kunmap_atomic(map); if (err) break; } i915_gem_object_finish_access(obj); out_unlock: i915_gem_object_unlock(obj); return err; } static int file_add_object(struct file *file, struct drm_i915_gem_object *obj) { int err; GEM_BUG_ON(obj->base.handle_count); /* tie the object to the drm_file for easy reaping */ err = idr_alloc(&to_drm_file(file)->object_idr, &obj->base, 1, 0, GFP_KERNEL); if (err < 0) return err; i915_gem_object_get(obj); obj->base.handle_count++; return 0; } static struct drm_i915_gem_object * create_test_object(struct i915_address_space *vm, struct file *file, struct list_head *objects) { struct drm_i915_gem_object *obj; u64 size; int err; /* Keep in GEM's good graces */ intel_gt_retire_requests(vm->gt); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size); if (IS_ERR(obj)) return obj; err = file_add_object(file, obj); i915_gem_object_put(obj); if (err) return ERR_PTR(err); err = cpu_fill(obj, STACK_MAGIC); if (err) { pr_err("Failed to fill object with cpu, err=%d\n", err); return ERR_PTR(err); } list_add_tail(&obj->st_link, objects); return obj; } static unsigned long max_dwords(struct drm_i915_gem_object *obj) { unsigned long npages = fake_page_count(obj); GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE)); return npages / DW_PER_PAGE; } static void throttle_release(struct i915_request **q, int count) { int i; for (i = 0; i < count; i++) { if (IS_ERR_OR_NULL(q[i])) continue; i915_request_put(fetch_and_zero(&q[i])); } } static int throttle(struct intel_context *ce, struct i915_request **q, int count) { int i; if (!IS_ERR_OR_NULL(q[0])) { if (i915_request_wait(q[0], I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) < 0) return -EINTR; i915_request_put(q[0]); } for (i = 0; i < count - 1; i++) q[i] = q[i + 1]; q[i] = intel_context_create_request(ce); if (IS_ERR(q[i])) return PTR_ERR(q[i]); i915_request_get(q[i]); i915_request_add(q[i]); return 0; } static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; int err = -ENODEV; /* * Create a few different contexts (with different mm) and write * through each ctx/mm using the GPU making sure those writes end * up in the expected pages of our obj. */ if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; for_each_uabi_engine(engine, i915) { struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; struct i915_request *tq[5] = {}; struct igt_live_test t; IGT_TIMEOUT(end_time); LIST_HEAD(objects); struct file *file; if (!intel_engine_can_store_dword(engine)) continue; if (!engine->context_size) continue; /* No logical context support in HW */ file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) goto out_file; ncontexts = 0; ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; struct intel_context *ce; ctx = kernel_context(i915, NULL); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_file; } ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); if (!obj) { obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); intel_context_put(ce); kernel_context_close(ctx); goto out_file; } } err = gpu_fill(ce, obj, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, str_yes_no(i915_gem_context_has_full_ppgtt(ctx)), err); intel_context_put(ce); kernel_context_close(ctx); goto out_file; } err = throttle(ce, tq, ARRAY_SIZE(tq)); if (err) { intel_context_put(ce); kernel_context_close(ctx); goto out_file; } if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; } ndwords++; ncontexts++; intel_context_put(ce); kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", ncontexts, engine->name, ndwords); ncontexts = dw = 0; list_for_each_entry(obj, &objects, st_link) { unsigned int rem = min_t(unsigned int, ndwords - dw, max_dwords(obj)); err = cpu_check(obj, ncontexts++, rem); if (err) break; dw += rem; } out_file: throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; fput(file); if (err) return err; i915_gem_drain_freed_objects(i915); } return 0; } static int igt_shared_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; struct i915_request *tq[5] = {}; struct i915_gem_context *parent; struct intel_engine_cs *engine; struct igt_live_test t; struct file *file; int err = 0; /* * Create a few different contexts with the same mm and write * through each ctx using the GPU making sure those writes end * up in the expected pages of our obj. */ if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); parent = live_context(i915, file); if (IS_ERR(parent)) { err = PTR_ERR(parent); goto out_file; } if (!parent->vm) { /* not full-ppgtt; nothing to share */ err = 0; goto out_file; } err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_file; for_each_uabi_engine(engine, i915) { unsigned long ncontexts, ndwords, dw; struct drm_i915_gem_object *obj = NULL; IGT_TIMEOUT(end_time); LIST_HEAD(objects); if (!intel_engine_can_store_dword(engine)) continue; dw = 0; ndwords = 0; ncontexts = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; struct intel_context *ce; ctx = kernel_context(i915, parent->vm); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_test; } ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); if (!obj) { obj = create_test_object(parent->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); intel_context_put(ce); kernel_context_close(ctx); goto out_test; } } err = gpu_fill(ce, obj, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, str_yes_no(i915_gem_context_has_full_ppgtt(ctx)), err); intel_context_put(ce); kernel_context_close(ctx); goto out_test; } err = throttle(ce, tq, ARRAY_SIZE(tq)); if (err) { intel_context_put(ce); kernel_context_close(ctx); goto out_test; } if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; } ndwords++; ncontexts++; intel_context_put(ce); kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", ncontexts, engine->name, ndwords); ncontexts = dw = 0; list_for_each_entry(obj, &objects, st_link) { unsigned int rem = min_t(unsigned int, ndwords - dw, max_dwords(obj)); err = cpu_check(obj, ncontexts++, rem); if (err) goto out_test; dw += rem; } i915_gem_drain_freed_objects(i915); } out_test: throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; out_file: fput(file); return err; } static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma, struct intel_engine_cs *engine) { u32 *cmd; GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8); cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB); if (IS_ERR(cmd)) return PTR_ERR(cmd); *cmd++ = MI_STORE_REGISTER_MEM_GEN8; *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base)); *cmd++ = lower_32_bits(i915_vma_offset(vma)); *cmd++ = upper_32_bits(i915_vma_offset(vma)); *cmd = MI_BATCH_BUFFER_END; __i915_gem_object_flush_map(rpcs, 0, 64); i915_gem_object_unpin_map(rpcs); intel_gt_chipset_flush(vma->vm->gt); return 0; } static int emit_rpcs_query(struct drm_i915_gem_object *obj, struct intel_context *ce, struct i915_request **rq_out) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_request *rq; struct i915_gem_ww_ctx ww; struct i915_vma *batch; struct i915_vma *vma; struct drm_i915_gem_object *rpcs; int err; GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); if (GRAPHICS_VER(i915) < 8) return -EINVAL; vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(rpcs)) return PTR_ERR(rpcs); batch = i915_vma_instance(rpcs, ce->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto err_put; } i915_gem_ww_ctx_init(&ww, false); retry: err = i915_gem_object_lock(obj, &ww); if (!err) err = i915_gem_object_lock(rpcs, &ww); if (!err) err = i915_gem_object_set_to_gtt_domain(obj, false); if (!err) err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) goto err_put; err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER); if (err) goto err_vma; err = rpcs_query_batch(rpcs, vma, ce->engine); if (err) goto err_batch; rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } err = i915_vma_move_to_active(batch, rq, 0); if (err) goto skip_request; err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); if (err) goto skip_request; if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); if (err) goto skip_request; } err = rq->engine->emit_bb_start(rq, i915_vma_offset(batch), i915_vma_size(batch), 0); if (err) goto skip_request; *rq_out = i915_request_get(rq); skip_request: if (err) i915_request_set_error_once(rq, err); i915_request_add(rq); err_batch: i915_vma_unpin(batch); err_vma: i915_vma_unpin(vma); err_put: if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&ww); i915_gem_object_put(rpcs); return err; } #define TEST_IDLE BIT(0) #define TEST_BUSY BIT(1) #define TEST_RESET BIT(2) static int __sseu_prepare(const char *name, unsigned int flags, struct intel_context *ce, struct igt_spinner **spin) { struct i915_request *rq; int ret; *spin = NULL; if (!(flags & (TEST_BUSY | TEST_RESET))) return 0; *spin = kzalloc(sizeof(**spin), GFP_KERNEL); if (!*spin) return -ENOMEM; ret = igt_spinner_init(*spin, ce->engine->gt); if (ret) goto err_free; rq = igt_spinner_create_request(*spin, ce, MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); goto err_fini; } i915_request_add(rq); if (!igt_wait_for_spinner(*spin, rq)) { pr_err("%s: Spinner failed to start!\n", name); ret = -ETIMEDOUT; goto err_end; } return 0; err_end: igt_spinner_end(*spin); err_fini: igt_spinner_fini(*spin); err_free: kfree(fetch_and_zero(spin)); return ret; } static int __read_slice_count(struct intel_context *ce, struct drm_i915_gem_object *obj, struct igt_spinner *spin, u32 *rpcs) { struct i915_request *rq = NULL; u32 s_mask, s_shift; unsigned int cnt; u32 *buf, val; long ret; ret = emit_rpcs_query(obj, ce, &rq); if (ret) return ret; if (spin) igt_spinner_end(spin); ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); i915_request_put(rq); if (ret < 0) return ret; buf = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(buf)) { ret = PTR_ERR(buf); return ret; } if (GRAPHICS_VER(ce->engine->i915) >= 11) { s_mask = GEN11_RPCS_S_CNT_MASK; s_shift = GEN11_RPCS_S_CNT_SHIFT; } else { s_mask = GEN8_RPCS_S_CNT_MASK; s_shift = GEN8_RPCS_S_CNT_SHIFT; } val = *buf; cnt = (val & s_mask) >> s_shift; *rpcs = val; i915_gem_object_unpin_map(obj); return cnt; } static int __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, const char *prefix, const char *suffix) { if (slices == expected) return 0; if (slices < 0) { pr_err("%s: %s read slice count failed with %d%s\n", name, prefix, slices, suffix); return slices; } pr_err("%s: %s slice count %d is not %u%s\n", name, prefix, slices, expected, suffix); pr_info("RPCS=0x%x; %u%sx%u%s\n", rpcs, slices, (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "", (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT, (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : ""); return -EINVAL; } static int __sseu_finish(const char *name, unsigned int flags, struct intel_context *ce, struct drm_i915_gem_object *obj, unsigned int expected, struct igt_spinner *spin) { unsigned int slices = hweight32(ce->engine->sseu.slice_mask); u32 rpcs = 0; int ret = 0; if (flags & TEST_RESET) { ret = intel_engine_reset(ce->engine, "sseu"); if (ret) goto out; } ret = __read_slice_count(ce, obj, flags & TEST_RESET ? NULL : spin, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); if (ret) goto out; ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); out: if (spin) igt_spinner_end(spin); if ((flags & TEST_IDLE) && ret == 0) { ret = igt_flush_test(ce->engine->i915); if (ret) return ret; ret = __read_slice_count(ce, obj, NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", " after idle!"); } return ret; } static int __sseu_test(const char *name, unsigned int flags, struct intel_context *ce, struct drm_i915_gem_object *obj, struct intel_sseu sseu) { struct igt_spinner *spin = NULL; int ret; intel_engine_pm_get(ce->engine); ret = __sseu_prepare(name, flags, ce, &spin); if (ret) goto out_pm; ret = intel_context_reconfigure_sseu(ce, sseu); if (ret) goto out_spin; ret = __sseu_finish(name, flags, ce, obj, hweight32(sseu.slice_mask), spin); out_spin: if (spin) { igt_spinner_end(spin); igt_spinner_fini(spin); kfree(spin); } out_pm: intel_engine_pm_put(ce->engine); return ret; } static int __igt_ctx_sseu(struct drm_i915_private *i915, const char *name, unsigned int flags) { struct drm_i915_gem_object *obj; int inst = 0; int ret = 0; if (GRAPHICS_VER(i915) < 9) return 0; if (flags & TEST_RESET) igt_global_reset_lock(to_gt(i915)); obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } do { struct intel_engine_cs *engine; struct intel_context *ce; struct intel_sseu pg_sseu; engine = intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, inst++); if (!engine) break; if (hweight32(engine->sseu.slice_mask) < 2) continue; if (!engine->gt->info.sseu.has_slice_pg) continue; /* * Gen11 VME friendly power-gated configuration with * half enabled sub-slices. */ pg_sseu = engine->sseu; pg_sseu.slice_mask = 1; pg_sseu.subslice_mask = ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", engine->name, name, flags, hweight32(engine->sseu.slice_mask), hweight32(pg_sseu.slice_mask)); ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_put; } ret = intel_context_pin(ce); if (ret) goto out_ce; /* First set the default mask. */ ret = __sseu_test(name, flags, ce, obj, engine->sseu); if (ret) goto out_unpin; /* Then set a power-gated configuration. */ ret = __sseu_test(name, flags, ce, obj, pg_sseu); if (ret) goto out_unpin; /* Back to defaults. */ ret = __sseu_test(name, flags, ce, obj, engine->sseu); if (ret) goto out_unpin; /* One last power-gated configuration for the road. */ ret = __sseu_test(name, flags, ce, obj, pg_sseu); if (ret) goto out_unpin; out_unpin: intel_context_unpin(ce); out_ce: intel_context_put(ce); } while (!ret); if (igt_flush_test(i915)) ret = -EIO; out_put: i915_gem_object_put(obj); out_unlock: if (flags & TEST_RESET) igt_global_reset_unlock(to_gt(i915)); if (ret) pr_err("%s: Failed with %d!\n", name, ret); return ret; } static int igt_ctx_sseu(void *arg) { struct { const char *name; unsigned int flags; } *phase, phases[] = { { .name = "basic", .flags = 0 }, { .name = "idle", .flags = TEST_IDLE }, { .name = "busy", .flags = TEST_BUSY }, { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET }, { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE }, { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE }, }; unsigned int i; int ret = 0; for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases); i++, phase++) ret = __igt_ctx_sseu(arg, phase->name, phase->flags); return ret; } static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; unsigned long idx, ndwords, dw, num_engines; struct drm_i915_gem_object *obj = NULL; struct i915_request *tq[5] = {}; struct i915_gem_engines_iter it; struct i915_address_space *vm; struct i915_gem_context *ctx; struct intel_context *ce; struct igt_live_test t; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); struct file *file; int err = -ENODEV; /* * Create a few read-only objects (with the occasional writable object) * and try to write into these object checking that the GPU discards * any write to a read-only object. */ file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_file; ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_file; } vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm; if (!vm || !vm->has_read_only) { err = 0; goto out_file; } num_engines = 0; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) if (intel_engine_can_store_dword(ce->engine)) num_engines++; i915_gem_context_unlock_engines(ctx); ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (!intel_engine_can_store_dword(ce->engine)) continue; if (!obj) { obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); i915_gem_context_unlock_engines(ctx); goto out_file; } if (prandom_u32_state(&prng) & 1) i915_gem_object_set_readonly(obj); } err = gpu_fill(ce, obj, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), ce->engine->name, str_yes_no(i915_gem_context_has_full_ppgtt(ctx)), err); i915_gem_context_unlock_engines(ctx); goto out_file; } err = throttle(ce, tq, ARRAY_SIZE(tq)); if (err) { i915_gem_context_unlock_engines(ctx); goto out_file; } if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; } ndwords++; } i915_gem_context_unlock_engines(ctx); } pr_info("Submitted %lu dwords (across %lu engines)\n", ndwords, num_engines); dw = 0; idx = 0; list_for_each_entry(obj, &objects, st_link) { unsigned int rem = min_t(unsigned int, ndwords - dw, max_dwords(obj)); unsigned int num_writes; num_writes = rem; if (i915_gem_object_is_readonly(obj)) num_writes = 0; err = cpu_check(obj, idx++, num_writes); if (err) break; dw += rem; } out_file: throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; fput(file); return err; } static int check_scratch(struct i915_address_space *vm, u64 offset) { struct drm_mm_node *node; mutex_lock(&vm->mutex); node = __drm_mm_interval_first(&vm->mm, offset, offset + sizeof(u32) - 1); mutex_unlock(&vm->mutex); if (!node || node->start > offset) return 0; GEM_BUG_ON(offset >= node->start + node->size); pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n", upper_32_bits(offset), lower_32_bits(offset)); return -EINVAL; } static int write_to_scratch(struct i915_gem_context *ctx, struct intel_engine_cs *engine, struct drm_i915_gem_object *obj, u64 offset, u32 value) { struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm; struct i915_request *rq; struct i915_vma *vma; u32 *cmd; int err; GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); err = check_scratch(ctx->vm, offset); if (err) return err; cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) return PTR_ERR(cmd); *cmd++ = MI_STORE_DWORD_IMM_GEN4; if (GRAPHICS_VER(i915) >= 8) { *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); } else { *cmd++ = 0; *cmd++ = offset; } *cmd++ = value; *cmd = MI_BATCH_BUFFER_END; __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(engine->gt); vm = i915_gem_context_get_eb_vm(ctx); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) goto out_vm; rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; } err = igt_vma_move_to_active_unlocked(vma, rq, 0); if (err) goto skip_request; if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); if (err) goto skip_request; } err = engine->emit_bb_start(rq, i915_vma_offset(vma), i915_vma_size(vma), 0); if (err) goto skip_request; i915_vma_unpin(vma); i915_request_add(rq); goto out_vm; skip_request: i915_request_set_error_once(rq, err); i915_request_add(rq); err_unpin: i915_vma_unpin(vma); out_vm: i915_vm_put(vm); if (!err) err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); return err; } static int read_from_scratch(struct i915_gem_context *ctx, struct intel_engine_cs *engine, struct drm_i915_gem_object *obj, u64 offset, u32 *value) { struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm; const u32 result = 0x100; struct i915_request *rq; struct i915_vma *vma; unsigned int flags; u32 *cmd; int err; GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); err = check_scratch(ctx->vm, offset); if (err) return err; if (GRAPHICS_VER(i915) >= 8) { const u32 GPR0 = engine->mmio_base + 0x600; vm = i915_gem_context_get_eb_vm(ctx); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) goto out_vm; cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err_unpin; } memset(cmd, POISON_INUSE, PAGE_SIZE); *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; *cmd++ = GPR0; *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); *cmd++ = MI_STORE_REGISTER_MEM_GEN8; *cmd++ = GPR0; *cmd++ = result; *cmd++ = 0; *cmd = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); flags = 0; } else { const u32 reg = engine->mmio_base + 0x420; /* hsw: register access even to 3DPRIM! is protected */ vm = i915_vm_get(&engine->gt->ggtt->vm); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_vm; } err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); if (err) goto out_vm; cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err_unpin; } memset(cmd, POISON_INUSE, PAGE_SIZE); *cmd++ = MI_LOAD_REGISTER_MEM; *cmd++ = reg; *cmd++ = offset; *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT; *cmd++ = reg; *cmd++ = i915_vma_offset(vma) + result; *cmd = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); flags = I915_DISPATCH_SECURE; } intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; } err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); if (err) goto skip_request; if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); if (err) goto skip_request; } err = engine->emit_bb_start(rq, i915_vma_offset(vma), i915_vma_size(vma), flags); if (err) goto skip_request; i915_vma_unpin(vma); i915_request_add(rq); i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (err) goto out_vm; cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto out_vm; } *value = cmd[result / sizeof(*cmd)]; i915_gem_object_unpin_map(obj); goto out_vm; skip_request: i915_request_set_error_once(rq, err); i915_request_add(rq); err_unpin: i915_vma_unpin(vma); out_vm: i915_vm_put(vm); if (!err) err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); return err; } static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) { struct i915_address_space *vm; u32 *vaddr; int err = 0; vm = ctx->vm; if (!vm) return -ENODEV; if (!vm->scratch[0]) { pr_err("No scratch page!\n"); return -EINVAL; } vaddr = __px_vaddr(vm->scratch[0]); memcpy(out, vaddr, sizeof(*out)); if (memchr_inv(vaddr, *out, PAGE_SIZE)) { pr_err("Inconsistent initial state of scratch page!\n"); err = -EINVAL; } return err; } static int igt_vm_isolation(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct drm_i915_gem_object *obj_a, *obj_b; unsigned long num_engines, count; struct intel_engine_cs *engine; struct igt_live_test t; I915_RND_STATE(prng); struct file *file; u64 vm_total; u32 expected; int err; if (GRAPHICS_VER(i915) < 7) return 0; /* * The simple goal here is that a write into one context is not * observed in a second (separate page tables and scratch). */ file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_file; ctx_a = live_context(i915, file); if (IS_ERR(ctx_a)) { err = PTR_ERR(ctx_a); goto out_file; } ctx_b = live_context(i915, file); if (IS_ERR(ctx_b)) { err = PTR_ERR(ctx_b); goto out_file; } /* We can only test vm isolation, if the vm are distinct */ if (ctx_a->vm == ctx_b->vm) goto out_file; /* Read the initial state of the scratch page */ err = check_scratch_page(ctx_a, &expected); if (err) goto out_file; err = check_scratch_page(ctx_b, &expected); if (err) goto out_file; vm_total = ctx_a->vm->total; GEM_BUG_ON(ctx_b->vm->total != vm_total); obj_a = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj_a)) { err = PTR_ERR(obj_a); goto out_file; } obj_b = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj_b)) { err = PTR_ERR(obj_b); goto put_a; } count = 0; num_engines = 0; for_each_uabi_engine(engine, i915) { IGT_TIMEOUT(end_time); unsigned long this = 0; if (!intel_engine_can_store_dword(engine)) continue; /* Not all engines have their own GPR! */ if (GRAPHICS_VER(i915) < 8 && engine->class != RENDER_CLASS) continue; while (!__igt_timeout(end_time, NULL)) { u32 value = 0xc5c5c5c5; u64 offset; /* Leave enough space at offset 0 for the batch */ offset = igt_random_offset(&prng, I915_GTT_PAGE_SIZE, vm_total, sizeof(u32), alignof_dword); err = write_to_scratch(ctx_a, engine, obj_a, offset, 0xdeadbeef); if (err == 0) err = read_from_scratch(ctx_b, engine, obj_b, offset, &value); if (err) goto put_b; if (value != expected) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", engine->name, value, upper_32_bits(offset), lower_32_bits(offset), this); err = -EINVAL; goto put_b; } this++; } count += this; num_engines++; } pr_info("Checked %lu scratch offsets across %lu engines\n", count, num_engines); put_b: i915_gem_object_put(obj_b); put_a: i915_gem_object_put(obj_a); out_file: if (igt_live_test_end(&t)) err = -EIO; fput(file); return err; } int i915_gem_context_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_nop_switch), SUBTEST(live_parallel_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), SUBTEST(igt_ctx_sseu), SUBTEST(igt_shared_ctx_exec), SUBTEST(igt_vm_isolation), }; if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2017 Intel Corporation */ #include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" #include "i915_selftest.h" #include "selftests/i915_random.h" struct context { struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; }; static int cpu_set(struct context *ctx, unsigned long offset, u32 v) { unsigned int needs_clflush; struct page *page; void *map; u32 *cpu; int err; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); if (err) goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); if (needs_clflush & CLFLUSH_BEFORE) drm_clflush_virt_range(cpu, sizeof(*cpu)); *cpu = v; if (needs_clflush & CLFLUSH_AFTER) drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); out: i915_gem_object_unlock(ctx->obj); return err; } static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) { unsigned int needs_clflush; struct page *page; void *map; u32 *cpu; int err; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush); if (err) goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); if (needs_clflush & CLFLUSH_BEFORE) drm_clflush_virt_range(cpu, sizeof(*cpu)); *v = *cpu; kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); out: i915_gem_object_unlock(ctx->obj); return err; } static int gtt_set(struct context *ctx, unsigned long offset, u32 v) { struct i915_vma *vma; u32 __iomem *map; int err = 0; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); i915_gem_object_unlock(ctx->obj); if (err) return err; vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out_rpm; } iowrite32(v, &map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); out_rpm: intel_gt_pm_put(vma->vm->gt); return err; } static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) { struct i915_vma *vma; u32 __iomem *map; int err = 0; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, false); i915_gem_object_unlock(ctx->obj); if (err) return err; vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out_rpm; } *v = ioread32(&map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); out_rpm: intel_gt_pm_put(vma->vm->gt); return err; } static int wc_set(struct context *ctx, unsigned long offset, u32 v) { u32 *map; int err; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_wc_domain(ctx->obj, true); i915_gem_object_unlock(ctx->obj); if (err) return err; map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); map[offset / sizeof(*map)] = v; __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map)); i915_gem_object_unpin_map(ctx->obj); return 0; } static int wc_get(struct context *ctx, unsigned long offset, u32 *v) { u32 *map; int err; i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_wc_domain(ctx->obj, false); i915_gem_object_unlock(ctx->obj); if (err) return err; map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); *v = map[offset / sizeof(*map)]; i915_gem_object_unpin_map(ctx->obj); return 0; } static int gpu_set(struct context *ctx, unsigned long offset, u32 v) { struct i915_request *rq; struct i915_vma *vma; u32 *cs; int err; vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) return PTR_ERR(vma); i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); if (err) goto out_unlock; rq = intel_engine_create_kernel_request(ctx->engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_unpin; } cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto out_rq; } if (GRAPHICS_VER(ctx->engine->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; } else if (GRAPHICS_VER(ctx->engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; } else { *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; *cs++ = MI_NOOP; } intel_ring_advance(rq, cs); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); out_rq: i915_request_add(rq); out_unpin: i915_vma_unpin(vma); out_unlock: i915_gem_object_unlock(ctx->obj); return err; } static bool always_valid(struct context *ctx) { return true; } static bool needs_fence_registers(struct context *ctx) { struct intel_gt *gt = ctx->engine->gt; if (intel_gt_is_wedged(gt)) return false; return gt->ggtt->num_fences; } static bool needs_mi_store_dword(struct context *ctx) { if (intel_gt_is_wedged(ctx->engine->gt)) return false; return intel_engine_can_store_dword(ctx->engine); } static const struct igt_coherency_mode { const char *name; int (*set)(struct context *ctx, unsigned long offset, u32 v); int (*get)(struct context *ctx, unsigned long offset, u32 *v); bool (*valid)(struct context *ctx); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, { "gtt", gtt_set, gtt_get, needs_fence_registers }, { "wc", wc_set, wc_get, always_valid }, { "gpu", gpu_set, NULL, needs_mi_store_dword }, { }, }; static struct intel_engine_cs * random_engine(struct drm_i915_private *i915, struct rnd_state *prng) { struct intel_engine_cs *engine; unsigned int count; count = 0; for_each_uabi_engine(engine, i915) count++; count = i915_prandom_u32_max_state(count, prng); for_each_uabi_engine(engine, i915) if (count-- == 0) return engine; return NULL; } static int igt_gem_coherency(void *arg) { const unsigned int ncachelines = PAGE_SIZE/64; struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; unsigned long count, n; u32 *offsets, *values; I915_RND_STATE(prng); struct context ctx; int err = 0; /* * We repeatedly write, overwrite and read from a sequence of * cachelines in order to try and detect incoherency (unflushed writes * from either the CPU or GPU). Each setter/getter uses our cache * domain API which should prevent incoherency. */ offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL); if (!offsets) return -ENOMEM; for (count = 0; count < ncachelines; count++) offsets[count] = count * 64 + 4 * (count % 16); values = offsets + ncachelines; ctx.engine = random_engine(i915, &prng); if (!ctx.engine) { err = -ENODEV; goto out_free; } pr_info("%s: using %s\n", __func__, ctx.engine->name); intel_engine_pm_get(ctx.engine); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; if (!over->valid(&ctx)) continue; for (write = igt_coherency_mode; write->name; write++) { if (!write->set) continue; if (!write->valid(&ctx)) continue; for (read = igt_coherency_mode; read->name; read++) { if (!read->get) continue; if (!read->valid(&ctx)) continue; for_each_prime_number_from(count, 1, ncachelines) { ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(ctx.obj)) { err = PTR_ERR(ctx.obj); goto out_pm; } i915_random_reorder(offsets, ncachelines, &prng); for (n = 0; n < count; n++) values[n] = prandom_u32_state(&prng); for (n = 0; n < count; n++) { err = over->set(&ctx, offsets[n], ~values[n]); if (err) { pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n", n, count, over->name, err); goto put_object; } } for (n = 0; n < count; n++) { err = write->set(&ctx, offsets[n], values[n]); if (err) { pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n", n, count, write->name, err); goto put_object; } } for (n = 0; n < count; n++) { u32 found; err = read->get(&ctx, offsets[n], &found); if (err) { pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n", n, count, read->name, err); goto put_object; } if (found != values[n]) { pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n", n, count, over->name, write->name, values[n], read->name, found, ~values[n], offsets[n]); err = -EINVAL; goto put_object; } } i915_gem_object_put(ctx.obj); } } } } out_pm: intel_engine_pm_put(ctx.engine); out_free: kfree(offsets); return err; put_object: i915_gem_object_put(ctx.obj); goto out_pm; } int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_coherency), }; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2017 Intel Corporation */ #include <linux/prime_numbers.h> #include <linux/string_helpers.h> #include <linux/swap.h> #include "i915_selftest.h" #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gem_region.h" #include "gt/intel_gt.h" #include "igt_gem_utils.h" #include "mock_context.h" #include "selftests/mock_drm.h" #include "selftests/mock_gem_device.h" #include "selftests/mock_region.h" #include "selftests/i915_random.h" static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915, struct file *file) { struct i915_gem_context *ctx = live_context(i915, file); struct i915_address_space *vm; if (IS_ERR(ctx)) return ctx; vm = ctx->vm; if (vm) WRITE_ONCE(vm->scrub_64K, true); return ctx; } static const unsigned int page_sizes[] = { I915_GTT_PAGE_SIZE_2M, I915_GTT_PAGE_SIZE_64K, I915_GTT_PAGE_SIZE_4K, }; static unsigned int get_largest_page_size(struct drm_i915_private *i915, u64 rem) { int i; for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { unsigned int page_size = page_sizes[i]; if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) return page_size; } return 0; } static void huge_pages_free_pages(struct sg_table *st) { struct scatterlist *sg; for (sg = st->sgl; sg; sg = __sg_next(sg)) { if (sg_page(sg)) __free_pages(sg_page(sg), get_order(sg->length)); } sg_free_table(st); kfree(st); } static int get_huge_pages(struct drm_i915_gem_object *obj) { #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) unsigned int page_mask = obj->mm.page_mask; struct sg_table *st; struct scatterlist *sg; unsigned int sg_page_sizes; u64 rem; /* restricted by sg_alloc_table */ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int)) return -E2BIG; st = kmalloc(sizeof(*st), GFP); if (!st) return -ENOMEM; if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { kfree(st); return -ENOMEM; } rem = obj->base.size; sg = st->sgl; st->nents = 0; sg_page_sizes = 0; /* * Our goal here is simple, we want to greedily fill the object from * largest to smallest page-size, while ensuring that we use *every* * page-size as per the given page-mask. */ do { unsigned int bit = ilog2(page_mask); unsigned int page_size = BIT(bit); int order = get_order(page_size); do { struct page *page; GEM_BUG_ON(order > MAX_ORDER); page = alloc_pages(GFP | __GFP_ZERO, order); if (!page) goto err; sg_set_page(sg, page, page_size, 0); sg_page_sizes |= page_size; st->nents++; rem -= page_size; if (!rem) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while ((rem - ((page_size-1) & page_mask)) >= page_size); page_mask &= (page_size-1); } while (page_mask); if (i915_gem_gtt_prepare_pages(obj, st)) goto err; GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); __i915_gem_object_set_pages(obj, st); return 0; err: sg_set_page(sg, NULL, 0, 0); sg_mark_end(sg); huge_pages_free_pages(st); return -ENOMEM; } static void put_huge_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { i915_gem_gtt_finish_pages(obj, pages); huge_pages_free_pages(pages); obj->mm.dirty = false; __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops huge_page_ops = { .name = "huge-gem", .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = get_huge_pages, .put_pages = put_huge_pages, }; static struct drm_i915_gem_object * huge_pages_object(struct drm_i915_private *i915, u64 size, unsigned int page_mask) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); if (size >> PAGE_SHIFT > INT_MAX) return ERR_PTR(-E2BIG); if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; i915_gem_object_set_volatile(obj); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); obj->mm.page_mask = page_mask; return obj; } static int fake_get_huge_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); const u64 max_len = rounddown_pow_of_two(UINT_MAX); struct sg_table *st; struct scatterlist *sg; u64 rem; /* restricted by sg_alloc_table */ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int)) return -E2BIG; st = kmalloc(sizeof(*st), GFP); if (!st) return -ENOMEM; if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { kfree(st); return -ENOMEM; } /* Use optimal page sized chunks to fill in the sg table */ rem = obj->base.size; sg = st->sgl; st->nents = 0; do { unsigned int page_size = get_largest_page_size(i915, rem); unsigned int len = min(page_size * div_u64(rem, page_size), max_len); GEM_BUG_ON(!page_size); sg->offset = 0; sg->length = len; sg_dma_len(sg) = len; sg_dma_address(sg) = page_size; st->nents++; rem -= len; if (!rem) { sg_mark_end(sg); break; } sg = sg_next(sg); } while (1); i915_sg_trim(st); __i915_gem_object_set_pages(obj, st); return 0; } static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; unsigned int page_size; st = kmalloc(sizeof(*st), GFP); if (!st) return -ENOMEM; if (sg_alloc_table(st, 1, GFP)) { kfree(st); return -ENOMEM; } sg = st->sgl; st->nents = 1; page_size = get_largest_page_size(i915, obj->base.size); GEM_BUG_ON(!page_size); sg->offset = 0; sg->length = obj->base.size; sg_dma_len(sg) = obj->base.size; sg_dma_address(sg) = page_size; __i915_gem_object_set_pages(obj, st); return 0; #undef GFP } static void fake_free_huge_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { sg_free_table(pages); kfree(pages); } static void fake_put_huge_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { fake_free_huge_pages(obj, pages); obj->mm.dirty = false; } static const struct drm_i915_gem_object_ops fake_ops = { .name = "fake-gem", .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = fake_get_huge_pages, .put_pages = fake_put_huge_pages, }; static const struct drm_i915_gem_object_ops fake_ops_single = { .name = "fake-gem", .flags = I915_GEM_OBJECT_IS_SHRINKABLE, .get_pages = fake_get_huge_pages_single, .put_pages = fake_put_huge_pages, }; static struct drm_i915_gem_object * fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); if (size >> PAGE_SHIFT > UINT_MAX) return ERR_PTR(-E2BIG); if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); if (single) i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0); else i915_gem_object_init(obj, &fake_ops, &lock_class, 0); i915_gem_object_set_volatile(obj); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE); return obj; } static int igt_check_page_sizes(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj = vma->obj; int err; /* We have to wait for the async bind to complete before our asserts */ err = i915_vma_sync(vma); if (err) return err; if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { pr_err("unsupported page_sizes.sg=%u, supported=%u\n", vma->page_sizes.sg & ~supported, supported); err = -EINVAL; } if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) { pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", vma->resource->page_sizes_gtt & ~supported, supported); err = -EINVAL; } if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n", vma->page_sizes.phys, obj->mm.page_sizes.phys); err = -EINVAL; } if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n", vma->page_sizes.sg, obj->mm.page_sizes.sg); err = -EINVAL; } /* * The dma-api is like a box of chocolates when it comes to the * alignment of dma addresses, however for LMEM we have total control * and so can guarantee alignment, likewise when we allocate our blocks * they should appear in descending order, and if we know that we align * to the largest page size for the GTT address, we should be able to * assert that if we see 2M physical pages then we should also get 2M * GTT pages. If we don't then something might be wrong in our * construction of the backing pages. * * Maintaining alignment is required to utilise huge pages in the ppGGT. */ if (i915_gem_object_is_lmem(obj) && IS_ALIGNED(i915_vma_offset(vma), SZ_2M) && vma->page_sizes.sg & SZ_2M && vma->resource->page_sizes_gtt < SZ_2M) { pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n", vma->page_sizes.sg, vma->resource->page_sizes_gtt); err = -EINVAL; } return err; } static int igt_mock_exhaust_device_supported_pages(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; struct i915_vma *vma; int i, j, single; int err; /* * Sanity check creating objects with every valid page support * combination for our mock device. */ for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { unsigned int combination = SZ_4K; /* Required for ppGTT */ for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { if (i & BIT(j)) combination |= page_sizes[j]; } RUNTIME_INFO(i915)->page_sizes = combination; for (single = 0; single <= 1; ++single) { obj = fake_huge_pages_object(i915, combination, !!single); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_device; } if (obj->base.size != combination) { pr_err("obj->base.size=%zu, expected=%u\n", obj->base.size, combination); err = -EINVAL; goto out_put; } vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; } err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) goto out_put; err = igt_check_page_sizes(vma); if (vma->page_sizes.sg != combination) { pr_err("page_sizes.sg=%u, expected=%u\n", vma->page_sizes.sg, combination); err = -EINVAL; } i915_vma_unpin(vma); i915_gem_object_put(obj); if (err) goto out_device; } } goto out_device; out_put: i915_gem_object_put(obj); out_device: RUNTIME_INFO(i915)->page_sizes = saved_mask; return err; } static int igt_mock_memory_region_huge_pages(void *arg) { const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct intel_memory_region *mem; struct drm_i915_gem_object *obj; struct i915_vma *vma; int bit; int err = 0; mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0); if (IS_ERR(mem)) { pr_err("%s failed to create memory region\n", __func__); return PTR_ERR(mem); } for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { unsigned int page_size = BIT(bit); resource_size_t phys; int i; for (i = 0; i < ARRAY_SIZE(flags); ++i) { obj = i915_gem_object_create_region(mem, page_size, page_size, flags[i]); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_region; } vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; } err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) goto out_put; err = igt_check_page_sizes(vma); if (err) goto out_unpin; phys = i915_gem_object_get_dma_address(obj, 0); if (!IS_ALIGNED(phys, page_size)) { pr_err("%s addr misaligned(%pa) page_size=%u\n", __func__, &phys, page_size); err = -EINVAL; goto out_unpin; } if (vma->resource->page_sizes_gtt != page_size) { pr_err("%s page_sizes.gtt=%u, expected=%u\n", __func__, vma->resource->page_sizes_gtt, page_size); err = -EINVAL; goto out_unpin; } i915_vma_unpin(vma); __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } } goto out_region; out_unpin: i915_vma_unpin(vma); out_put: i915_gem_object_put(obj); out_region: intel_memory_region_destroy(mem); return err; } static int igt_mock_ppgtt_misaligned_dma(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; int bit; int err; /* * Sanity check dma misalignment for huge pages -- the dma addresses we * insert into the paging structures need to always respect the page * size alignment. */ bit = ilog2(I915_GTT_PAGE_SIZE_64K); for_each_set_bit_from(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { IGT_TIMEOUT(end_time); unsigned int page_size = BIT(bit); unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int offset; unsigned int size = round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1; struct i915_vma *vma; obj = fake_huge_pages_object(i915, size, true); if (IS_ERR(obj)) return PTR_ERR(obj); if (obj->base.size != size) { pr_err("obj->base.size=%zu, expected=%u\n", obj->base.size, size); err = -EINVAL; goto out_put; } err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; /* Force the page size for this object */ obj->mm.page_sizes.sg = page_size; vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_unpin; } err = i915_vma_pin(vma, 0, 0, flags); if (err) goto out_unpin; err = igt_check_page_sizes(vma); if (vma->resource->page_sizes_gtt != page_size) { pr_err("page_sizes.gtt=%u, expected %u\n", vma->resource->page_sizes_gtt, page_size); err = -EINVAL; } i915_vma_unpin(vma); if (err) goto out_unpin; /* * Try all the other valid offsets until the next * boundary -- should always fall back to using 4K * pages. */ for (offset = 4096; offset < page_size; offset += 4096) { err = i915_vma_unbind_unlocked(vma); if (err) goto out_unpin; err = i915_vma_pin(vma, 0, 0, flags | offset); if (err) goto out_unpin; err = igt_check_page_sizes(vma); if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) { pr_err("page_sizes.gtt=%u, expected %llu\n", vma->resource->page_sizes_gtt, I915_GTT_PAGE_SIZE_4K); err = -EINVAL; } i915_vma_unpin(vma); if (err) goto out_unpin; if (igt_timeout(end_time, "%s timed out at offset %x with page-size %x\n", __func__, offset, page_size)) break; } i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); } return 0; out_unpin: i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); i915_gem_object_unlock(obj); out_put: i915_gem_object_put(obj); return err; } static void close_object_list(struct list_head *objects) { struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { list_del(&obj->st_link); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); } } static int igt_ppgtt_huge_fill(void *arg) { struct drm_i915_private *i915 = arg; unsigned int supported = RUNTIME_INFO(i915)->page_sizes; bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long max_pages; unsigned long page_num; struct file *file; bool single = false; LIST_HEAD(objects); IGT_TIMEOUT(end_time); int err = -ENODEV; if (supported == I915_GTT_PAGE_SIZE_4K) return 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } vm = i915_gem_context_get_eb_vm(ctx); max_pages = vm->total >> PAGE_SHIFT; for_each_prime_number_from(page_num, 1, max_pages) { struct drm_i915_gem_object *obj; u64 size = page_num << PAGE_SHIFT; struct i915_vma *vma; unsigned int expected_gtt = 0; int i; obj = fake_huge_pages_object(i915, size, single); if (IS_ERR(obj)) { err = PTR_ERR(obj); break; } if (obj->base.size != size) { pr_err("obj->base.size=%zd, expected=%llu\n", obj->base.size, size); i915_gem_object_put(obj); err = -EINVAL; break; } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); break; } list_add(&obj->st_link, &objects); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); break; } /* vma start must be aligned to BIT(21) to allow 2M PTEs */ err = i915_vma_pin(vma, 0, BIT(21), PIN_USER); if (err) break; err = igt_check_page_sizes(vma); if (err) { i915_vma_unpin(vma); break; } /* * Figure out the expected gtt page size knowing that we go from * largest to smallest page size sg chunks, and that we align to * the largest page size. */ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { unsigned int page_size = page_sizes[i]; if (HAS_PAGE_SIZES(i915, page_size) && size >= page_size) { expected_gtt |= page_size; size &= page_size-1; } } GEM_BUG_ON(!expected_gtt); GEM_BUG_ON(size); if (!has_pte64 && (obj->base.size < I915_GTT_PAGE_SIZE_2M || expected_gtt & I915_GTT_PAGE_SIZE_2M)) expected_gtt &= ~I915_GTT_PAGE_SIZE_64K; i915_vma_unpin(vma); if (!has_pte64 && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { if (!IS_ALIGNED(vma->node.start, I915_GTT_PAGE_SIZE_2M)) { pr_err("node.start(%llx) not aligned to 2M\n", vma->node.start); err = -EINVAL; break; } if (!IS_ALIGNED(vma->node.size, I915_GTT_PAGE_SIZE_2M)) { pr_err("node.size(%llx) not aligned to 2M\n", vma->node.size); err = -EINVAL; break; } } if (vma->resource->page_sizes_gtt != expected_gtt) { pr_err("gtt=%#x, expected=%#x, size=0x%zx, single=%s\n", vma->resource->page_sizes_gtt, expected_gtt, obj->base.size, str_yes_no(!!single)); err = -EINVAL; break; } if (igt_timeout(end_time, "%s timed out at size %zd\n", __func__, obj->base.size)) break; single = !single; } close_object_list(&objects); if (err == -ENOMEM || err == -ENOSPC) err = 0; i915_vm_put(vm); out: fput(file); return err; } static int igt_ppgtt_64K(void *arg) { struct drm_i915_private *i915 = arg; bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); struct drm_i915_gem_object *obj; struct i915_address_space *vm; struct i915_gem_context *ctx; struct file *file; const struct object_info { unsigned int size; unsigned int gtt; unsigned int offset; } objects[] = { /* Cases with forced padding/alignment */ { .size = SZ_64K, .gtt = I915_GTT_PAGE_SIZE_64K, .offset = 0, }, { .size = SZ_64K + SZ_4K, .gtt = I915_GTT_PAGE_SIZE_4K, .offset = 0, }, { .size = SZ_64K - SZ_4K, .gtt = I915_GTT_PAGE_SIZE_4K, .offset = 0, }, { .size = SZ_2M, .gtt = I915_GTT_PAGE_SIZE_64K, .offset = 0, }, { .size = SZ_2M - SZ_4K, .gtt = I915_GTT_PAGE_SIZE_4K, .offset = 0, }, { .size = SZ_2M + SZ_4K, .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K, .offset = 0, }, { .size = SZ_2M + SZ_64K, .gtt = I915_GTT_PAGE_SIZE_64K, .offset = 0, }, { .size = SZ_2M - SZ_64K, .gtt = I915_GTT_PAGE_SIZE_64K, .offset = 0, }, /* Try without any forced padding/alignment */ { .size = SZ_64K, .offset = SZ_2M, .gtt = I915_GTT_PAGE_SIZE_4K, }, { .size = SZ_128K, .offset = SZ_2M - SZ_64K, .gtt = I915_GTT_PAGE_SIZE_4K, }, }; struct i915_vma *vma; int i, single; int err; /* * Sanity check some of the trickiness with 64K pages -- either we can * safely mark the whole page-table(2M block) as 64K, or we have to * always fallback to 4K. */ if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K)) return 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } vm = i915_gem_context_get_eb_vm(ctx); for (i = 0; i < ARRAY_SIZE(objects); ++i) { unsigned int size = objects[i].size; unsigned int expected_gtt = objects[i].gtt; unsigned int offset = objects[i].offset; unsigned int flags = PIN_USER; /* * For modern GTT models, the requirements for marking a page-table * as 64K have been relaxed. Account for this. */ if (has_pte64) { expected_gtt = 0; if (size >= SZ_64K) expected_gtt |= I915_GTT_PAGE_SIZE_64K; if (size & (SZ_64K - 1)) expected_gtt |= I915_GTT_PAGE_SIZE_4K; } for (single = 0; single <= 1; single++) { obj = fake_huge_pages_object(i915, size, !!single); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_vm; } err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_object_put; /* * Disable 2M pages -- We only want to use 64K/4K pages * for this test. */ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_object_unpin; } if (offset) flags |= PIN_OFFSET_FIXED | offset; err = i915_vma_pin(vma, 0, 0, flags); if (err) goto out_object_unpin; err = igt_check_page_sizes(vma); if (err) goto out_vma_unpin; if (!has_pte64 && !offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { if (!IS_ALIGNED(vma->node.start, I915_GTT_PAGE_SIZE_2M)) { pr_err("node.start(%llx) not aligned to 2M\n", vma->node.start); err = -EINVAL; goto out_vma_unpin; } if (!IS_ALIGNED(vma->node.size, I915_GTT_PAGE_SIZE_2M)) { pr_err("node.size(%llx) not aligned to 2M\n", vma->node.size); err = -EINVAL; goto out_vma_unpin; } } if (vma->resource->page_sizes_gtt != expected_gtt) { pr_err("gtt=%#x, expected=%#x, i=%d, single=%s offset=%#x size=%#x\n", vma->resource->page_sizes_gtt, expected_gtt, i, str_yes_no(!!single), offset, size); err = -EINVAL; goto out_vma_unpin; } i915_vma_unpin(vma); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); i915_gem_drain_freed_objects(i915); } } goto out_vm; out_vma_unpin: i915_vma_unpin(vma); out_object_unpin: i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); i915_gem_object_unlock(obj); out_object_put: i915_gem_object_put(obj); out_vm: i915_vm_put(vm); out: fput(file); return err; } static int gpu_write(struct intel_context *ce, struct i915_vma *vma, u32 dw, u32 val) { int err; i915_gem_object_lock(vma->obj, NULL); err = i915_gem_object_set_to_gtt_domain(vma->obj, true); i915_gem_object_unlock(vma->obj); if (err) return err; return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), vma->size >> PAGE_SHIFT, val); } static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) { unsigned int needs_flush; unsigned long n; int err; i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) goto err_unlock; for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); if (needs_flush & CLFLUSH_BEFORE) drm_clflush_virt_range(ptr, PAGE_SIZE); if (ptr[dword] != val) { pr_err("n=%lu ptr[%u]=%u, val=%u\n", n, dword, ptr[dword], val); kunmap_atomic(ptr); err = -EINVAL; break; } kunmap_atomic(ptr); } i915_gem_object_finish_access(obj); err_unlock: i915_gem_object_unlock(obj); return err; } static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val) { unsigned long n = obj->base.size >> PAGE_SHIFT; u32 *ptr; int err; err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) return err; ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(ptr)) return PTR_ERR(ptr); ptr += dword; while (n--) { if (*ptr != val) { pr_err("base[%u]=%08x, val=%08x\n", dword, *ptr, val); err = -EINVAL; break; } ptr += PAGE_SIZE / sizeof(*ptr); } i915_gem_object_unpin_map(obj); return err; } static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) { if (i915_gem_object_has_struct_page(obj)) return __cpu_check_shmem(obj, dword, val); else return __cpu_check_vmap(obj, dword, val); } static int __igt_write_huge(struct intel_context *ce, struct drm_i915_gem_object *obj, u64 size, u64 offset, u32 dword, u32 val) { unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); err = i915_vma_pin(vma, size, 0, flags | offset); if (err) { /* * The ggtt may have some pages reserved so * refrain from erroring out. */ if (err == -ENOSPC && i915_is_ggtt(ce->vm)) err = 0; return err; } err = igt_check_page_sizes(vma); if (err) goto out_vma_unpin; err = gpu_write(ce, vma, dword, val); if (err) { pr_err("gpu-write failed at offset=%llx\n", offset); goto out_vma_unpin; } err = cpu_check(obj, dword, val); if (err) { pr_err("cpu-check failed at offset=%llx\n", offset); goto out_vma_unpin; } out_vma_unpin: i915_vma_unpin(vma); return err; } static int igt_write_huge(struct drm_i915_private *i915, struct drm_i915_gem_object *obj) { struct i915_gem_engines *engines; struct i915_gem_engines_iter it; struct intel_context *ce; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); unsigned int max_page_size; unsigned int count; struct i915_gem_context *ctx; struct file *file; u64 max; u64 num; u64 size; int *order; int i, n; int err = 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); size = obj->base.size; if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K && !HAS_64K_PAGES(i915)) size = round_up(size, I915_GTT_PAGE_SIZE_2M); n = 0; count = 0; max = U64_MAX; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { count++; if (!intel_engine_can_store_dword(ce->engine)) continue; max = min(max, ce->vm->total); n++; } i915_gem_context_unlock_engines(ctx); if (!n) goto out; /* * To keep things interesting when alternating between engines in our * randomized order, lets also make feeding to the same engine a few * times in succession a possibility by enlarging the permutation array. */ order = i915_random_order(count * count, &prng); if (!order) { err = -ENOMEM; goto out; } max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); max = div_u64(max - size, max_page_size); /* * Try various offsets in an ascending/descending fashion until we * timeout -- we want to avoid issues hidden by effectively always using * offset = 0. */ i = 0; engines = i915_gem_context_lock_engines(ctx); for_each_prime_number_from(num, 0, max) { u64 offset_low = num * max_page_size; u64 offset_high = (max - num) * max_page_size; u32 dword = offset_in_page(num) / 4; struct intel_context *ce; ce = engines->engines[order[i] % engines->num_engines]; i = (i + 1) % (count * count); if (!ce || !intel_engine_can_store_dword(ce->engine)) continue; /* * In order to utilize 64K pages we need to both pad the vma * size and ensure the vma offset is at the start of the pt * boundary, however to improve coverage we opt for testing both * aligned and unaligned offsets. * * With PS64 this is no longer the case, but to ensure we * sometimes get the compact layout for smaller objects, apply * the round_up anyway. */ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) offset_low = round_down(offset_low, I915_GTT_PAGE_SIZE_2M); err = __igt_write_huge(ce, obj, size, offset_low, dword, num + 1); if (err) break; err = __igt_write_huge(ce, obj, size, offset_high, dword, num + 1); if (err) break; if (igt_timeout(end_time, "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", __func__, ce->engine->name, offset_low, offset_high, max_page_size)) break; } i915_gem_context_unlock_engines(ctx); kfree(order); out: fput(file); return err; } typedef struct drm_i915_gem_object * (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) { return i915->mm.gemfs && has_transparent_hugepage(); } static struct drm_i915_gem_object * igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags) { if (!igt_can_allocate_thp(i915)) { pr_info("%s missing THP support, skipping\n", __func__); return ERR_PTR(-ENODEV); } return i915_gem_object_create_shmem(i915, size); } static struct drm_i915_gem_object * igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags) { return i915_gem_object_create_internal(i915, size); } static struct drm_i915_gem_object * igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags) { return huge_pages_object(i915, size, size); } static struct drm_i915_gem_object * igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags) { return i915_gem_object_create_lmem(i915, size, flags); } static u32 igt_random_size(struct rnd_state *prng, u32 min_page_size, u32 max_page_size) { u64 mask; u32 size; GEM_BUG_ON(!is_power_of_2(min_page_size)); GEM_BUG_ON(!is_power_of_2(max_page_size)); GEM_BUG_ON(min_page_size < PAGE_SIZE); GEM_BUG_ON(min_page_size > max_page_size); mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK; size = prandom_u32_state(prng) & mask; if (size < min_page_size) size |= min_page_size; return size; } static int igt_ppgtt_smoke_huge(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; I915_RND_STATE(prng); struct { igt_create_fn fn; u32 min; u32 max; } backends[] = { { igt_create_internal, SZ_64K, SZ_2M, }, { igt_create_shmem, SZ_64K, SZ_32M, }, { igt_create_local, SZ_64K, SZ_1G, }, }; int err; int i; /* * Sanity check that the HW uses huge pages correctly through our * various backends -- ensure that our writes land in the right place. */ for (i = 0; i < ARRAY_SIZE(backends); ++i) { u32 min = backends[i].min; u32 max = backends[i].max; u32 size = max; try_again: size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); obj = backends[i].fn(i915, size, 0); if (IS_ERR(obj)) { err = PTR_ERR(obj); if (err == -E2BIG) { size >>= 1; goto try_again; } else if (err == -ENODEV) { err = 0; continue; } return err; } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) { i915_gem_object_put(obj); size >>= 1; goto try_again; } goto out_put; } if (obj->mm.page_sizes.phys < min) { pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n", __func__, size, i); err = -ENOMEM; goto out_unpin; } err = igt_write_huge(i915, obj); if (err) { pr_err("%s write-huge failed with size=%u, i=%d\n", __func__, size, i); } out_unpin: i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); out_put: i915_gem_object_put(obj); if (err == -ENOMEM || err == -ENXIO) err = 0; if (err) break; cond_resched(); } return err; } static int igt_ppgtt_sanity_check(void *arg) { struct drm_i915_private *i915 = arg; unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct { igt_create_fn fn; unsigned int flags; } backends[] = { { igt_create_system, 0, }, { igt_create_local, 0, }, { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, }; struct { u32 size; u32 pages; } combos[] = { { SZ_64K, SZ_64K }, { SZ_2M, SZ_2M }, { SZ_2M, SZ_64K }, { SZ_2M - SZ_64K, SZ_64K }, { SZ_2M - SZ_4K, SZ_64K | SZ_4K }, { SZ_2M + SZ_4K, SZ_64K | SZ_4K }, { SZ_2M + SZ_4K, SZ_2M | SZ_4K }, { SZ_2M + SZ_64K, SZ_2M | SZ_64K }, { SZ_2M + SZ_64K, SZ_64K }, }; int i, j; int err; if (supported == I915_GTT_PAGE_SIZE_4K) return 0; /* * Sanity check that the HW behaves with a limited set of combinations. * We already have a bunch of randomised testing, which should give us * a decent amount of variation between runs, however we should keep * this to limit the chances of introducing a temporary regression, by * testing the most obvious cases that might make something blow up. */ for (i = 0; i < ARRAY_SIZE(backends); ++i) { for (j = 0; j < ARRAY_SIZE(combos); ++j) { struct drm_i915_gem_object *obj; u32 size = combos[j].size; u32 pages = combos[j].pages; obj = backends[i].fn(i915, size, backends[i].flags); if (IS_ERR(obj)) { err = PTR_ERR(obj); if (err == -ENODEV) { pr_info("Device lacks local memory, skipping\n"); err = 0; break; } return err; } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; } GEM_BUG_ON(pages > obj->base.size); pages = pages & supported; if (pages) obj->mm.page_sizes.sg = pages; err = igt_write_huge(i915, obj); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); if (err) { pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", __func__, size, pages, i, j); goto out; } } cond_resched(); } out: if (err == -ENOMEM) err = 0; return err; } static int igt_ppgtt_compact(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; int err; /* * Simple test to catch issues with compact 64K pages -- since the pt is * compacted to 256B that gives us 32 entries per pt, however since the * backing page for the pt is 4K, any extra entries we might incorrectly * write out should be ignored by the HW. If ever hit such a case this * test should catch it since some of our writes would land in scratch. */ if (!HAS_64K_PAGES(i915)) { pr_info("device lacks compact 64K page support, skipping\n"); return 0; } if (!HAS_LMEM(i915)) { pr_info("device lacks LMEM support, skipping\n"); return 0; } /* We want the range to cover multiple page-table boundaries. */ obj = i915_gem_object_create_lmem(i915, SZ_4M, 0); if (IS_ERR(obj)) return PTR_ERR(obj); err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { pr_info("LMEM compact unable to allocate huge-page(s)\n"); goto out_unpin; } /* * Disable 2M GTT pages by forcing the page-size to 64K for the GTT * insertion. */ obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K; err = igt_write_huge(i915, obj); if (err) pr_err("LMEM compact write-huge failed\n"); out_unpin: i915_gem_object_unpin_pages(obj); out_put: i915_gem_object_put(obj); if (err == -ENOMEM) err = 0; return err; } static int igt_ppgtt_mixed(void *arg) { struct drm_i915_private *i915 = arg; const unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; struct drm_i915_gem_object *obj, *on; struct i915_gem_engines *engines; struct i915_gem_engines_iter it; struct i915_address_space *vm; struct i915_gem_context *ctx; struct intel_context *ce; struct file *file; I915_RND_STATE(prng); LIST_HEAD(objects); struct intel_memory_region *mr; struct i915_vma *vma; unsigned int count; u32 i, addr; int *order; int n, err; /* * Sanity check mixing 4K and 64K pages within the same page-table via * the new PS64 TLB hint. */ if (!HAS_64K_PAGES(i915)) { pr_info("device lacks PS64, skipping\n"); return 0; } file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } vm = i915_gem_context_get_eb_vm(ctx); i = 0; addr = 0; do { u32 sz; sz = i915_prandom_u32_max_state(SZ_4M, &prng); sz = max_t(u32, sz, SZ_4K); mr = i915->mm.regions[INTEL_REGION_LMEM_0]; if (i & 1) mr = i915->mm.regions[INTEL_REGION_SMEM]; obj = i915_gem_object_create_region(mr, sz, 0, 0); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_vm; } list_add_tail(&obj->st_link, &objects); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_put; } addr = round_up(addr, mr->min_page_size); err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) goto err_put; if (mr->type == INTEL_MEMORY_LOCAL && (vma->resource->page_sizes_gtt & I915_GTT_PAGE_SIZE_4K)) { err = -EINVAL; goto err_put; } addr += obj->base.size; i++; } while (addr <= SZ_16M); n = 0; count = 0; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { count++; if (!intel_engine_can_store_dword(ce->engine)) continue; n++; } i915_gem_context_unlock_engines(ctx); if (!n) goto err_put; order = i915_random_order(count * count, &prng); if (!order) { err = -ENOMEM; goto err_put; } i = 0; addr = 0; engines = i915_gem_context_lock_engines(ctx); list_for_each_entry(obj, &objects, st_link) { u32 rnd = i915_prandom_u32_max_state(UINT_MAX, &prng); addr = round_up(addr, obj->mm.region->min_page_size); ce = engines->engines[order[i] % engines->num_engines]; i = (i + 1) % (count * count); if (!ce || !intel_engine_can_store_dword(ce->engine)) continue; err = __igt_write_huge(ce, obj, obj->base.size, addr, 0, rnd); if (err) break; err = __igt_write_huge(ce, obj, obj->base.size, addr, offset_in_page(rnd) / sizeof(u32), rnd + 1); if (err) break; err = __igt_write_huge(ce, obj, obj->base.size, addr, (PAGE_SIZE / sizeof(u32)) - 1, rnd + 2); if (err) break; addr += obj->base.size; cond_resched(); } i915_gem_context_unlock_engines(ctx); kfree(order); err_put: list_for_each_entry_safe(obj, on, &objects, st_link) { list_del(&obj->st_link); i915_gem_object_put(obj); } out_vm: i915_vm_put(vm); out: fput(file); return err; } static int igt_tmpfs_fallback(void *arg) { struct drm_i915_private *i915 = arg; struct i915_address_space *vm; struct i915_gem_context *ctx; struct vfsmount *gemfs = i915->mm.gemfs; struct drm_i915_gem_object *obj; struct i915_vma *vma; struct file *file; u32 *vaddr; int err = 0; file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } vm = i915_gem_context_get_eb_vm(ctx); /* * Make sure that we don't burst into a ball of flames upon falling back * to tmpfs, which we rely on if on the off-chance we encouter a failure * when setting up gemfs. */ i915->mm.gemfs = NULL; obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_restore; } vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_put; } *vaddr = 0xdeadbeaf; __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; } err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) goto out_put; err = igt_check_page_sizes(vma); i915_vma_unpin(vma); out_put: i915_gem_object_put(obj); out_restore: i915->mm.gemfs = gemfs; i915_vm_put(vm); out: fput(file); return err; } static int igt_shrink_thp(void *arg) { struct drm_i915_private *i915 = arg; struct i915_address_space *vm; struct i915_gem_context *ctx; struct drm_i915_gem_object *obj; struct i915_gem_engines_iter it; struct intel_context *ce; struct i915_vma *vma; struct file *file; unsigned int flags = PIN_USER; unsigned int n; intel_wakeref_t wf; bool should_swap; int err; if (!igt_can_allocate_thp(i915)) { pr_info("missing THP support, skipping\n"); return 0; } file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); ctx = hugepage_ctx(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } vm = i915_gem_context_get_eb_vm(ctx); /* * Sanity check shrinking huge-paged object -- make sure nothing blows * up. */ obj = i915_gem_object_create_shmem(i915, SZ_2M); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_vm; } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; } wf = intel_runtime_pm_get(&i915->runtime_pm); /* active shrink */ err = i915_vma_pin(vma, 0, 0, flags); if (err) goto out_wf; if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { pr_info("failed to allocate THP, finishing test early\n"); goto out_unpin; } err = igt_check_page_sizes(vma); if (err) goto out_unpin; n = 0; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (!intel_engine_can_store_dword(ce->engine)) continue; err = gpu_write(ce, vma, n++, 0xdeadbeaf); if (err) break; } i915_gem_context_unlock_engines(ctx); /* * Nuke everything *before* we unpin the pages so we can be reasonably * sure that when later checking get_nr_swap_pages() that some random * leftover object doesn't steal the remaining swap space. */ i915_gem_shrink(NULL, i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE); i915_vma_unpin(vma); if (err) goto out_wf; /* * Now that the pages are *unpinned* shrinking should invoke * shmem to truncate our pages, if we have available swap. */ should_swap = get_nr_swap_pages() > 0; i915_gem_shrink(NULL, i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_ACTIVE | I915_SHRINK_WRITEBACK); if (should_swap == i915_gem_object_has_pages(obj)) { pr_err("unexpected pages mismatch, should_swap=%s\n", str_yes_no(should_swap)); err = -EINVAL; goto out_wf; } if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) { pr_err("unexpected residual page-size bits, should_swap=%s\n", str_yes_no(should_swap)); err = -EINVAL; goto out_wf; } err = i915_vma_pin(vma, 0, 0, flags); if (err) goto out_wf; while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); if (err) break; } out_unpin: i915_vma_unpin(vma); out_wf: intel_runtime_pm_put(&i915->runtime_pm, wf); out_put: i915_gem_object_put(obj); out_vm: i915_vm_put(vm); out: fput(file); return err; } int i915_gem_huge_page_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_mock_exhaust_device_supported_pages), SUBTEST(igt_mock_memory_region_huge_pages), SUBTEST(igt_mock_ppgtt_misaligned_dma), }; struct drm_i915_private *dev_priv; struct i915_ppgtt *ppgtt; int err; dev_priv = mock_gem_device(); if (!dev_priv) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; RUNTIME_INFO(dev_priv)->ppgtt_size = 48; ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; } if (!i915_vm_is_4lvl(&ppgtt->vm)) { pr_err("failed to create 48b PPGTT\n"); err = -EINVAL; goto out_put; } /* If we were ever hit this then it's time to mock the 64K scratch */ if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { pr_err("PPGTT missing 64K scratch page\n"); err = -EINVAL; goto out_put; } err = i915_subtests(tests, ppgtt); out_put: i915_vm_put(&ppgtt->vm); out_unlock: mock_destroy_device(dev_priv); return err; } int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_shrink_thp), SUBTEST(igt_tmpfs_fallback), SUBTEST(igt_ppgtt_smoke_huge), SUBTEST(igt_ppgtt_sanity_check), SUBTEST(igt_ppgtt_compact), SUBTEST(igt_ppgtt_mixed), SUBTEST(igt_ppgtt_huge_fill), SUBTEST(igt_ppgtt_64K), }; if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); return 0; } if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); }
linux-master
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
/* * SPDX-License-Identifier: MIT * * Copyright © 2018 Intel Corporation */ #include "igt_gem_utils.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "i915_vma.h" #include "i915_drv.h" #include "i915_request.h" struct i915_request * igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct intel_context *ce; struct i915_request *rq; /* * Pinning the contexts may generate requests in order to acquire * GGTT space, so do this first before we reserve a seqno for * ourselves. */ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); if (IS_ERR(ce)) return ERR_CAST(ce); rq = intel_context_create_request(ce); intel_context_put(ce); return rq; } struct i915_vma * igt_emit_store_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 val) { struct drm_i915_gem_object *obj; const int ver = GRAPHICS_VER(vma->vm->i915); unsigned long n, size; u32 *cmd; int err; size = (4 * count + 1) * sizeof(u32); size = round_up(size, PAGE_SIZE); obj = i915_gem_object_create_internal(vma->vm->i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; } GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma)); offset += i915_vma_offset(vma); for (n = 0; n < count; n++) { if (ver >= 8) { *cmd++ = MI_STORE_DWORD_IMM_GEN4; *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); *cmd++ = val; } else if (ver >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | (ver < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = val; } else { *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = val; } offset += PAGE_SIZE; } *cmd = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(vma->vm->gt); vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; } err = i915_vma_pin(vma, 0, 0, PIN_USER); if (err) goto err; return vma; err: i915_gem_object_put(obj); return ERR_PTR(err); } int igt_gpu_fill_dw(struct intel_context *ce, struct i915_vma *vma, u64 offset, unsigned long count, u32 val) { struct i915_request *rq; struct i915_vma *batch; unsigned int flags; int err; GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); GEM_BUG_ON(!i915_vma_is_pinned(vma)); batch = igt_emit_store_dw(vma, offset, count, val); if (IS_ERR(batch)) return PTR_ERR(batch); rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } err = igt_vma_move_to_active_unlocked(batch, rq, 0); if (err) goto skip_request; err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); if (err) goto skip_request; flags = 0; if (GRAPHICS_VER(ce->vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, i915_vma_offset(batch), i915_vma_size(batch), flags); skip_request: if (err) i915_request_set_error_once(rq, err); i915_request_add(rq); err_batch: i915_vma_unpin_and_release(&batch, 0); return err; }
linux-master
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Unisoc Inc. */ #include <linux/component.h> #include <linux/delay.h> #include <linux/dma-buf.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include "sprd_drm.h" #include "sprd_dpu.h" #include "sprd_dsi.h" /* Global control registers */ #define REG_DPU_CTRL 0x04 #define REG_DPU_CFG0 0x08 #define REG_PANEL_SIZE 0x20 #define REG_BLEND_SIZE 0x24 #define REG_BG_COLOR 0x2C /* Layer0 control registers */ #define REG_LAY_BASE_ADDR0 0x30 #define REG_LAY_BASE_ADDR1 0x34 #define REG_LAY_BASE_ADDR2 0x38 #define REG_LAY_CTRL 0x40 #define REG_LAY_SIZE 0x44 #define REG_LAY_PITCH 0x48 #define REG_LAY_POS 0x4C #define REG_LAY_ALPHA 0x50 #define REG_LAY_CROP_START 0x5C /* Interrupt control registers */ #define REG_DPU_INT_EN 0x1E0 #define REG_DPU_INT_CLR 0x1E4 #define REG_DPU_INT_STS 0x1E8 /* DPI control registers */ #define REG_DPI_CTRL 0x1F0 #define REG_DPI_H_TIMING 0x1F4 #define REG_DPI_V_TIMING 0x1F8 /* MMU control registers */ #define REG_MMU_EN 0x800 #define REG_MMU_VPN_RANGE 0x80C #define REG_MMU_PPN1 0x83C #define REG_MMU_RANGE1 0x840 #define REG_MMU_PPN2 0x844 #define REG_MMU_RANGE2 0x848 /* Global control bits */ #define BIT_DPU_RUN BIT(0) #define BIT_DPU_STOP BIT(1) #define BIT_DPU_REG_UPDATE BIT(2) #define BIT_DPU_IF_EDPI BIT(0) /* Layer control bits */ #define BIT_DPU_LAY_EN BIT(0) #define BIT_DPU_LAY_LAYER_ALPHA (0x01 << 2) #define BIT_DPU_LAY_COMBO_ALPHA (0x02 << 2) #define BIT_DPU_LAY_FORMAT_YUV422_2PLANE (0x00 << 4) #define BIT_DPU_LAY_FORMAT_YUV420_2PLANE (0x01 << 4) #define BIT_DPU_LAY_FORMAT_YUV420_3PLANE (0x02 << 4) #define BIT_DPU_LAY_FORMAT_ARGB8888 (0x03 << 4) #define BIT_DPU_LAY_FORMAT_RGB565 (0x04 << 4) #define BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3 (0x00 << 8) #define BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0 (0x01 << 8) #define BIT_DPU_LAY_NO_SWITCH (0x00 << 10) #define BIT_DPU_LAY_RB_OR_UV_SWITCH (0x01 << 10) #define BIT_DPU_LAY_MODE_BLEND_NORMAL (0x00 << 16) #define BIT_DPU_LAY_MODE_BLEND_PREMULT (0x01 << 16) #define BIT_DPU_LAY_ROTATION_0 (0x00 << 20) #define BIT_DPU_LAY_ROTATION_90 (0x01 << 20) #define BIT_DPU_LAY_ROTATION_180 (0x02 << 20) #define BIT_DPU_LAY_ROTATION_270 (0x03 << 20) #define BIT_DPU_LAY_ROTATION_0_M (0x04 << 20) #define BIT_DPU_LAY_ROTATION_90_M (0x05 << 20) #define BIT_DPU_LAY_ROTATION_180_M (0x06 << 20) #define BIT_DPU_LAY_ROTATION_270_M (0x07 << 20) /* Interrupt control & status bits */ #define BIT_DPU_INT_DONE BIT(0) #define BIT_DPU_INT_TE BIT(1) #define BIT_DPU_INT_ERR BIT(2) #define BIT_DPU_INT_UPDATE_DONE BIT(4) #define BIT_DPU_INT_VSYNC BIT(5) /* DPI control bits */ #define BIT_DPU_EDPI_TE_EN BIT(8) #define BIT_DPU_EDPI_FROM_EXTERNAL_PAD BIT(10) #define BIT_DPU_DPI_HALT_EN BIT(16) static const u32 layer_fmts[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_NV16, DRM_FORMAT_NV61, DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, }; struct sprd_plane { struct drm_plane base; }; static int dpu_wait_stop_done(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; int rc; if (ctx->stopped) return 0; rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_stop, msecs_to_jiffies(500)); ctx->evt_stop = false; ctx->stopped = true; if (!rc) { drm_err(dpu->drm, "dpu wait for stop done time out!\n"); return -ETIMEDOUT; } return 0; } static int dpu_wait_update_done(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; int rc; ctx->evt_update = false; rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_update, msecs_to_jiffies(500)); if (!rc) { drm_err(dpu->drm, "dpu wait for reg update done time out!\n"); return -ETIMEDOUT; } return 0; } static u32 drm_format_to_dpu(struct drm_framebuffer *fb) { u32 format = 0; switch (fb->format->format) { case DRM_FORMAT_BGRA8888: /* BGRA8888 -> ARGB8888 */ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; format |= BIT_DPU_LAY_FORMAT_ARGB8888; break; case DRM_FORMAT_RGBX8888: case DRM_FORMAT_RGBA8888: /* RGBA8888 -> ABGR8888 */ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; fallthrough; case DRM_FORMAT_ABGR8888: /* RB switch */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; fallthrough; case DRM_FORMAT_ARGB8888: format |= BIT_DPU_LAY_FORMAT_ARGB8888; break; case DRM_FORMAT_XBGR8888: /* RB switch */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; fallthrough; case DRM_FORMAT_XRGB8888: format |= BIT_DPU_LAY_FORMAT_ARGB8888; break; case DRM_FORMAT_BGR565: /* RB switch */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; fallthrough; case DRM_FORMAT_RGB565: format |= BIT_DPU_LAY_FORMAT_RGB565; break; case DRM_FORMAT_NV12: /* 2-Lane: Yuv420 */ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; /* UV endian */ format |= BIT_DPU_LAY_NO_SWITCH; break; case DRM_FORMAT_NV21: /* 2-Lane: Yuv420 */ format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; /* UV endian */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; break; case DRM_FORMAT_NV16: /* 2-Lane: Yuv422 */ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; /* UV endian */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; break; case DRM_FORMAT_NV61: /* 2-Lane: Yuv422 */ format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; /* UV endian */ format |= BIT_DPU_LAY_NO_SWITCH; break; case DRM_FORMAT_YUV420: format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; /* UV endian */ format |= BIT_DPU_LAY_NO_SWITCH; break; case DRM_FORMAT_YVU420: format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; /* Y endian */ format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; /* UV endian */ format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; break; default: break; } return format; } static u32 drm_rotation_to_dpu(struct drm_plane_state *state) { u32 rotation = 0; switch (state->rotation) { default: case DRM_MODE_ROTATE_0: rotation = BIT_DPU_LAY_ROTATION_0; break; case DRM_MODE_ROTATE_90: rotation = BIT_DPU_LAY_ROTATION_90; break; case DRM_MODE_ROTATE_180: rotation = BIT_DPU_LAY_ROTATION_180; break; case DRM_MODE_ROTATE_270: rotation = BIT_DPU_LAY_ROTATION_270; break; case DRM_MODE_REFLECT_Y: rotation = BIT_DPU_LAY_ROTATION_180_M; break; case (DRM_MODE_REFLECT_Y | DRM_MODE_ROTATE_90): rotation = BIT_DPU_LAY_ROTATION_90_M; break; case DRM_MODE_REFLECT_X: rotation = BIT_DPU_LAY_ROTATION_0_M; break; case (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90): rotation = BIT_DPU_LAY_ROTATION_270_M; break; } return rotation; } static u32 drm_blend_to_dpu(struct drm_plane_state *state) { u32 blend = 0; switch (state->pixel_blend_mode) { case DRM_MODE_BLEND_COVERAGE: /* alpha mode select - combo alpha */ blend |= BIT_DPU_LAY_COMBO_ALPHA; /* Normal mode */ blend |= BIT_DPU_LAY_MODE_BLEND_NORMAL; break; case DRM_MODE_BLEND_PREMULTI: /* alpha mode select - combo alpha */ blend |= BIT_DPU_LAY_COMBO_ALPHA; /* Pre-mult mode */ blend |= BIT_DPU_LAY_MODE_BLEND_PREMULT; break; case DRM_MODE_BLEND_PIXEL_NONE: default: /* don't do blending, maybe RGBX */ /* alpha mode select - layer alpha */ blend |= BIT_DPU_LAY_LAYER_ALPHA; break; } return blend; } static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state) { struct dpu_context *ctx = &dpu->ctx; struct drm_gem_dma_object *dma_obj; struct drm_framebuffer *fb = state->fb; u32 addr, size, offset, pitch, blend, format, rotation; u32 src_x = state->src_x >> 16; u32 src_y = state->src_y >> 16; u32 src_w = state->src_w >> 16; u32 src_h = state->src_h >> 16; u32 dst_x = state->crtc_x; u32 dst_y = state->crtc_y; u32 alpha = state->alpha; u32 index = state->zpos; int i; offset = (dst_x & 0xffff) | (dst_y << 16); size = (src_w & 0xffff) | (src_h << 16); for (i = 0; i < fb->format->num_planes; i++) { dma_obj = drm_fb_dma_get_gem_obj(fb, i); addr = dma_obj->dma_addr + fb->offsets[i]; if (i == 0) layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index); else if (i == 1) layer_reg_wr(ctx, REG_LAY_BASE_ADDR1, addr, index); else layer_reg_wr(ctx, REG_LAY_BASE_ADDR2, addr, index); } if (fb->format->num_planes == 3) { /* UV pitch is 1/2 of Y pitch */ pitch = (fb->pitches[0] / fb->format->cpp[0]) | (fb->pitches[0] / fb->format->cpp[0] << 15); } else { pitch = fb->pitches[0] / fb->format->cpp[0]; } layer_reg_wr(ctx, REG_LAY_POS, offset, index); layer_reg_wr(ctx, REG_LAY_SIZE, size, index); layer_reg_wr(ctx, REG_LAY_CROP_START, src_y << 16 | src_x, index); layer_reg_wr(ctx, REG_LAY_ALPHA, alpha, index); layer_reg_wr(ctx, REG_LAY_PITCH, pitch, index); format = drm_format_to_dpu(fb); blend = drm_blend_to_dpu(state); rotation = drm_rotation_to_dpu(state); layer_reg_wr(ctx, REG_LAY_CTRL, BIT_DPU_LAY_EN | format | blend | rotation, index); } static void sprd_dpu_flip(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; /* * Make sure the dpu is in stop status. DPU has no shadow * registers in EDPI mode. So the config registers can only be * updated in the rising edge of DPU_RUN bit. */ if (ctx->if_type == SPRD_DPU_IF_EDPI) dpu_wait_stop_done(dpu); /* update trigger and wait */ if (ctx->if_type == SPRD_DPU_IF_DPI) { if (!ctx->stopped) { dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_REG_UPDATE); dpu_wait_update_done(dpu); } dpu_reg_set(ctx, REG_DPU_INT_EN, BIT_DPU_INT_ERR); } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); ctx->stopped = false; } } static void sprd_dpu_init(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; u32 int_mask = 0; writel(0x00, ctx->base + REG_BG_COLOR); writel(0x00, ctx->base + REG_MMU_EN); writel(0x00, ctx->base + REG_MMU_PPN1); writel(0xffff, ctx->base + REG_MMU_RANGE1); writel(0x00, ctx->base + REG_MMU_PPN2); writel(0xffff, ctx->base + REG_MMU_RANGE2); writel(0x1ffff, ctx->base + REG_MMU_VPN_RANGE); if (ctx->if_type == SPRD_DPU_IF_DPI) { /* use dpi as interface */ dpu_reg_clr(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); /* disable Halt function for SPRD DSI */ dpu_reg_clr(ctx, REG_DPI_CTRL, BIT_DPU_DPI_HALT_EN); /* select te from external pad */ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); /* enable dpu update done INT */ int_mask |= BIT_DPU_INT_UPDATE_DONE; /* enable dpu done INT */ int_mask |= BIT_DPU_INT_DONE; /* enable dpu dpi vsync */ int_mask |= BIT_DPU_INT_VSYNC; /* enable dpu TE INT */ int_mask |= BIT_DPU_INT_TE; /* enable underflow err INT */ int_mask |= BIT_DPU_INT_ERR; } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { /* use edpi as interface */ dpu_reg_set(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); /* use external te */ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); /* enable te */ dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_TE_EN); /* enable stop done INT */ int_mask |= BIT_DPU_INT_DONE; /* enable TE INT */ int_mask |= BIT_DPU_INT_TE; } writel(int_mask, ctx->base + REG_DPU_INT_EN); } static void sprd_dpu_fini(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; writel(0x00, ctx->base + REG_DPU_INT_EN); writel(0xff, ctx->base + REG_DPU_INT_CLR); } static void sprd_dpi_init(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; u32 reg_val; u32 size; size = (ctx->vm.vactive << 16) | ctx->vm.hactive; writel(size, ctx->base + REG_PANEL_SIZE); writel(size, ctx->base + REG_BLEND_SIZE); if (ctx->if_type == SPRD_DPU_IF_DPI) { /* set dpi timing */ reg_val = ctx->vm.hsync_len << 0 | ctx->vm.hback_porch << 8 | ctx->vm.hfront_porch << 20; writel(reg_val, ctx->base + REG_DPI_H_TIMING); reg_val = ctx->vm.vsync_len << 0 | ctx->vm.vback_porch << 8 | ctx->vm.vfront_porch << 20; writel(reg_val, ctx->base + REG_DPI_V_TIMING); } } void sprd_dpu_run(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); ctx->stopped = false; } void sprd_dpu_stop(struct sprd_dpu *dpu) { struct dpu_context *ctx = &dpu->ctx; if (ctx->if_type == SPRD_DPU_IF_DPI) dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_STOP); dpu_wait_stop_done(dpu); } static int sprd_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc_state *crtc_state; u32 fmt; if (!plane_state->fb || !plane_state->crtc) return 0; fmt = drm_format_to_dpu(plane_state->fb); if (!fmt) return -EINVAL; crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); return drm_atomic_helper_check_plane_state(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, true, true); } static void sprd_plane_atomic_update(struct drm_plane *drm_plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, drm_plane); struct sprd_dpu *dpu = to_sprd_crtc(new_state->crtc); /* start configure dpu layers */ sprd_dpu_layer(dpu, new_state); } static void sprd_plane_atomic_disable(struct drm_plane *drm_plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, drm_plane); struct sprd_dpu *dpu = to_sprd_crtc(old_state->crtc); layer_reg_wr(&dpu->ctx, REG_LAY_CTRL, 0x00, old_state->zpos); } static void sprd_plane_create_properties(struct sprd_plane *plane, int index) { unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE); /* create rotation property */ drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK); /* create alpha property */ drm_plane_create_alpha_property(&plane->base); /* create blend mode property */ drm_plane_create_blend_mode_property(&plane->base, supported_modes); /* create zpos property */ drm_plane_create_zpos_immutable_property(&plane->base, index); } static const struct drm_plane_helper_funcs sprd_plane_helper_funcs = { .atomic_check = sprd_plane_atomic_check, .atomic_update = sprd_plane_atomic_update, .atomic_disable = sprd_plane_atomic_disable, }; static const struct drm_plane_funcs sprd_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; static struct sprd_plane *sprd_planes_init(struct drm_device *drm) { struct sprd_plane *plane, *primary; enum drm_plane_type plane_type; int i; for (i = 0; i < 6; i++) { plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; plane = drmm_universal_plane_alloc(drm, struct sprd_plane, base, 1, &sprd_plane_funcs, layer_fmts, ARRAY_SIZE(layer_fmts), NULL, plane_type, NULL); if (IS_ERR(plane)) { drm_err(drm, "failed to init drm plane: %d\n", i); return plane; } drm_plane_helper_add(&plane->base, &sprd_plane_helper_funcs); sprd_plane_create_properties(plane, i); if (i == 0) primary = plane; } return primary; } static void sprd_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct drm_encoder *encoder; struct sprd_dsi *dsi; drm_display_mode_to_videomode(mode, &dpu->ctx.vm); drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { dsi = encoder_to_dsi(encoder); if (dsi->slave->mode_flags & MIPI_DSI_MODE_VIDEO) dpu->ctx.if_type = SPRD_DPU_IF_DPI; else dpu->ctx.if_type = SPRD_DPU_IF_EDPI; } sprd_dpi_init(dpu); } static void sprd_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); sprd_dpu_init(dpu); drm_crtc_vblank_on(&dpu->base); } static void sprd_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); struct drm_device *drm = dpu->base.dev; drm_crtc_vblank_off(&dpu->base); sprd_dpu_fini(dpu); spin_lock_irq(&drm->event_lock); if (crtc->state->event) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&drm->event_lock); } static void sprd_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); struct drm_device *drm = dpu->base.dev; sprd_dpu_flip(dpu); spin_lock_irq(&drm->event_lock); if (crtc->state->event) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&drm->event_lock); } static int sprd_crtc_enable_vblank(struct drm_crtc *crtc) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); dpu_reg_set(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); return 0; } static void sprd_crtc_disable_vblank(struct drm_crtc *crtc) { struct sprd_dpu *dpu = to_sprd_crtc(crtc); dpu_reg_clr(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); } static const struct drm_crtc_helper_funcs sprd_crtc_helper_funcs = { .mode_set_nofb = sprd_crtc_mode_set_nofb, .atomic_flush = sprd_crtc_atomic_flush, .atomic_enable = sprd_crtc_atomic_enable, .atomic_disable = sprd_crtc_atomic_disable, }; static const struct drm_crtc_funcs sprd_crtc_funcs = { .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = sprd_crtc_enable_vblank, .disable_vblank = sprd_crtc_disable_vblank, }; static struct sprd_dpu *sprd_crtc_init(struct drm_device *drm, struct drm_plane *primary, struct device *dev) { struct device_node *port; struct sprd_dpu *dpu; dpu = drmm_crtc_alloc_with_planes(drm, struct sprd_dpu, base, primary, NULL, &sprd_crtc_funcs, NULL); if (IS_ERR(dpu)) { drm_err(drm, "failed to init crtc\n"); return dpu; } drm_crtc_helper_add(&dpu->base, &sprd_crtc_helper_funcs); /* * set crtc port so that drm_of_find_possible_crtcs call works */ port = of_graph_get_port_by_id(dev->of_node, 0); if (!port) { drm_err(drm, "failed to found crtc output port for %s\n", dev->of_node->full_name); return ERR_PTR(-EINVAL); } dpu->base.port = port; of_node_put(port); return dpu; } static irqreturn_t sprd_dpu_isr(int irq, void *data) { struct sprd_dpu *dpu = data; struct dpu_context *ctx = &dpu->ctx; u32 reg_val, int_mask = 0; reg_val = readl(ctx->base + REG_DPU_INT_STS); /* disable err interrupt */ if (reg_val & BIT_DPU_INT_ERR) { int_mask |= BIT_DPU_INT_ERR; drm_warn(dpu->drm, "Warning: dpu underflow!\n"); } /* dpu update done isr */ if (reg_val & BIT_DPU_INT_UPDATE_DONE) { ctx->evt_update = true; wake_up_interruptible_all(&ctx->wait_queue); } /* dpu stop done isr */ if (reg_val & BIT_DPU_INT_DONE) { ctx->evt_stop = true; wake_up_interruptible_all(&ctx->wait_queue); } if (reg_val & BIT_DPU_INT_VSYNC) drm_crtc_handle_vblank(&dpu->base); writel(reg_val, ctx->base + REG_DPU_INT_CLR); dpu_reg_clr(ctx, REG_DPU_INT_EN, int_mask); return IRQ_HANDLED; } static int sprd_dpu_context_init(struct sprd_dpu *dpu, struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dpu_context *ctx = &dpu->ctx; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "failed to get I/O resource\n"); return -EINVAL; } ctx->base = devm_ioremap(dev, res->start, resource_size(res)); if (!ctx->base) { dev_err(dev, "failed to map dpu registers\n"); return -EFAULT; } ctx->irq = platform_get_irq(pdev, 0); if (ctx->irq < 0) return ctx->irq; /* disable and clear interrupts before register dpu IRQ. */ writel(0x00, ctx->base + REG_DPU_INT_EN); writel(0xff, ctx->base + REG_DPU_INT_CLR); ret = devm_request_irq(dev, ctx->irq, sprd_dpu_isr, IRQF_TRIGGER_NONE, "DPU", dpu); if (ret) { dev_err(dev, "failed to register dpu irq handler\n"); return ret; } init_waitqueue_head(&ctx->wait_queue); return 0; } static int sprd_dpu_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct sprd_dpu *dpu; struct sprd_plane *plane; int ret; plane = sprd_planes_init(drm); if (IS_ERR(plane)) return PTR_ERR(plane); dpu = sprd_crtc_init(drm, &plane->base, dev); if (IS_ERR(dpu)) return PTR_ERR(dpu); dpu->drm = drm; dev_set_drvdata(dev, dpu); ret = sprd_dpu_context_init(dpu, dev); if (ret) return ret; return 0; } static const struct component_ops dpu_component_ops = { .bind = sprd_dpu_bind, }; static const struct of_device_id dpu_match_table[] = { { .compatible = "sprd,sharkl3-dpu" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, dpu_match_table); static int sprd_dpu_probe(struct platform_device *pdev) { return component_add(&pdev->dev, &dpu_component_ops); } static int sprd_dpu_remove(struct platform_device *pdev) { component_del(&pdev->dev, &dpu_component_ops); return 0; } struct platform_driver sprd_dpu_driver = { .probe = sprd_dpu_probe, .remove = sprd_dpu_remove, .driver = { .name = "sprd-dpu-drv", .of_match_table = dpu_match_table, }, }; MODULE_AUTHOR("Leon He <[email protected]>"); MODULE_AUTHOR("Kevin Tang <[email protected]>"); MODULE_DESCRIPTION("Unisoc Display Controller Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/sprd/sprd_dpu.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Unisoc Inc. */ #include <linux/component.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include "sprd_drm.h" #include "sprd_dpu.h" #include "sprd_dsi.h" #define SOFT_RESET 0x04 #define MASK_PROTOCOL_INT 0x0C #define MASK_INTERNAL_INT 0x14 #define DSI_MODE_CFG 0x18 #define VIRTUAL_CHANNEL_ID 0x1C #define GEN_RX_VCID GENMASK(1, 0) #define VIDEO_PKT_VCID GENMASK(3, 2) #define DPI_VIDEO_FORMAT 0x20 #define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0) #define LOOSELY18_EN BIT(6) #define VIDEO_PKT_CONFIG 0x24 #define VIDEO_PKT_SIZE GENMASK(15, 0) #define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16) #define VIDEO_LINE_HBLK_TIME 0x28 #define VIDEO_LINE_HBP_TIME GENMASK(15, 0) #define VIDEO_LINE_HSA_TIME GENMASK(31, 16) #define VIDEO_LINE_TIME 0x2C #define VIDEO_VBLK_LINES 0x30 #define VFP_LINES GENMASK(9, 0) #define VBP_LINES GENMASK(19, 10) #define VSA_LINES GENMASK(29, 20) #define VIDEO_VACTIVE_LINES 0x34 #define VID_MODE_CFG 0x38 #define VID_MODE_TYPE GENMASK(1, 0) #define LP_VSA_EN BIT(8) #define LP_VBP_EN BIT(9) #define LP_VFP_EN BIT(10) #define LP_VACT_EN BIT(11) #define LP_HBP_EN BIT(12) #define LP_HFP_EN BIT(13) #define FRAME_BTA_ACK_EN BIT(14) #define TIMEOUT_CNT_CLK_CONFIG 0x40 #define HTX_TO_CONFIG 0x44 #define LRX_H_TO_CONFIG 0x48 #define TX_ESC_CLK_CONFIG 0x5C #define CMD_MODE_CFG 0x68 #define TEAR_FX_EN BIT(0) #define GEN_HDR 0x6C #define GEN_DT GENMASK(5, 0) #define GEN_VC GENMASK(7, 6) #define GEN_PLD_DATA 0x70 #define PHY_CLK_LANE_LP_CTRL 0x74 #define PHY_CLKLANE_TX_REQ_HS BIT(0) #define AUTO_CLKLANE_CTRL_EN BIT(1) #define PHY_INTERFACE_CTRL 0x78 #define RF_PHY_SHUTDOWN BIT(0) #define RF_PHY_RESET_N BIT(1) #define RF_PHY_CLK_EN BIT(2) #define CMD_MODE_STATUS 0x98 #define GEN_CMD_RDATA_FIFO_EMPTY BIT(1) #define GEN_CMD_WDATA_FIFO_EMPTY BIT(3) #define GEN_CMD_CMD_FIFO_EMPTY BIT(5) #define GEN_CMD_RDCMD_DONE BIT(7) #define PHY_STATUS 0x9C #define PHY_LOCK BIT(1) #define PHY_MIN_STOP_TIME 0xA0 #define PHY_LANE_NUM_CONFIG 0xA4 #define PHY_CLKLANE_TIME_CONFIG 0xA8 #define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0) #define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16) #define PHY_DATALANE_TIME_CONFIG 0xAC #define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0) #define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16) #define MAX_READ_TIME 0xB0 #define RX_PKT_CHECK_CONFIG 0xB4 #define RX_PKT_ECC_EN BIT(0) #define RX_PKT_CRC_EN BIT(1) #define TA_EN 0xB8 #define EOTP_EN 0xBC #define TX_EOTP_EN BIT(0) #define RX_EOTP_EN BIT(1) #define VIDEO_NULLPKT_SIZE 0xC0 #define DCS_WM_PKT_SIZE 0xC4 #define VIDEO_SIG_DELAY_CONFIG 0xD0 #define VIDEO_SIG_DELAY GENMASK(23, 0) #define PHY_TST_CTRL0 0xF0 #define PHY_TESTCLR BIT(0) #define PHY_TESTCLK BIT(1) #define PHY_TST_CTRL1 0xF4 #define PHY_TESTDIN GENMASK(7, 0) #define PHY_TESTDOUT GENMASK(15, 8) #define PHY_TESTEN BIT(16) #define host_to_dsi(host) \ container_of(host, struct sprd_dsi, host) static inline u32 dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask, u32 shift) { return (readl(ctx->base + offset) & mask) >> shift; } static inline void dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask, u32 shift, u32 val) { u32 ret; ret = readl(ctx->base + offset); ret &= ~mask; ret |= (val << shift) & mask; writel(ret, ctx->base + offset); } static inline void dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask, u32 val) { u32 ret = readl(ctx->base + offset); writel((ret & ~mask) | (val & mask), ctx->base + offset); } static int regmap_tst_io_write(void *context, u32 reg, u32 val) { struct sprd_dsi *dsi = context; struct dsi_context *ctx = &dsi->ctx; if (val > 0xff || reg > 0xff) return -EINVAL; drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); return 0; } static int regmap_tst_io_read(void *context, u32 reg, u32 *val) { struct sprd_dsi *dsi = context; struct dsi_context *ctx = &dsi->ctx; int ret; if (reg > 0xff) return -EINVAL; dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); udelay(1); ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8); if (ret < 0) return ret; *val = ret; drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val); return 0; } static struct regmap_bus regmap_tst_io = { .reg_write = regmap_tst_io_write, .reg_read = regmap_tst_io_read, }; static const struct regmap_config byte_config = { .reg_bits = 8, .val_bits = 8, }; static int dphy_wait_pll_locked(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int i; for (i = 0; i < 50000; i++) { if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1)) return 0; udelay(3); } drm_err(dsi->drm, "dphy pll can not be locked\n"); return -ETIMEDOUT; } static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx) { int i; for (i = 0; i < 5000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3)) return 0; udelay(1); } return -ETIMEDOUT; } static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx) { int i; for (i = 0; i < 5000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5)) return 0; udelay(1); } return -ETIMEDOUT; } static int dsi_wait_rd_resp_completed(struct dsi_context *ctx) { int i; for (i = 0; i < 10000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7)) return 0; udelay(10); } return -ETIMEDOUT; } static u16 calc_bytes_per_pixel_x100(int coding) { u16 bpp_x100; switch (coding) { case COLOR_CODE_16BIT_CONFIG1: case COLOR_CODE_16BIT_CONFIG2: case COLOR_CODE_16BIT_CONFIG3: bpp_x100 = 200; break; case COLOR_CODE_18BIT_CONFIG1: case COLOR_CODE_18BIT_CONFIG2: bpp_x100 = 225; break; case COLOR_CODE_24BIT: bpp_x100 = 300; break; case COLOR_CODE_COMPRESSTION: bpp_x100 = 100; break; case COLOR_CODE_20BIT_YCC422_LOOSELY: bpp_x100 = 250; break; case COLOR_CODE_24BIT_YCC422: bpp_x100 = 300; break; case COLOR_CODE_16BIT_YCC422: bpp_x100 = 200; break; case COLOR_CODE_30BIT: bpp_x100 = 375; break; case COLOR_CODE_36BIT: bpp_x100 = 450; break; case COLOR_CODE_12BIT_YCC420: bpp_x100 = 150; break; default: DRM_ERROR("invalid color coding"); bpp_x100 = 0; break; } return bpp_x100; } static u8 calc_video_size_step(int coding) { u8 video_size_step; switch (coding) { case COLOR_CODE_16BIT_CONFIG1: case COLOR_CODE_16BIT_CONFIG2: case COLOR_CODE_16BIT_CONFIG3: case COLOR_CODE_18BIT_CONFIG1: case COLOR_CODE_18BIT_CONFIG2: case COLOR_CODE_24BIT: case COLOR_CODE_COMPRESSTION: return video_size_step = 1; case COLOR_CODE_20BIT_YCC422_LOOSELY: case COLOR_CODE_24BIT_YCC422: case COLOR_CODE_16BIT_YCC422: case COLOR_CODE_30BIT: case COLOR_CODE_36BIT: case COLOR_CODE_12BIT_YCC420: return video_size_step = 2; default: DRM_ERROR("invalid color coding"); return 0; } } static u16 round_video_size(int coding, u16 video_size) { switch (coding) { case COLOR_CODE_16BIT_YCC422: case COLOR_CODE_24BIT_YCC422: case COLOR_CODE_20BIT_YCC422_LOOSELY: case COLOR_CODE_12BIT_YCC420: /* round up active H pixels to a multiple of 2 */ if ((video_size % 2) != 0) video_size += 1; break; default: break; } return video_size; } #define SPRD_MIPI_DSI_FMT_DSC 0xff static u32 fmt_to_coding(u32 fmt) { switch (fmt) { case MIPI_DSI_FMT_RGB565: return COLOR_CODE_16BIT_CONFIG1; case MIPI_DSI_FMT_RGB666: case MIPI_DSI_FMT_RGB666_PACKED: return COLOR_CODE_18BIT_CONFIG1; case MIPI_DSI_FMT_RGB888: return COLOR_CODE_24BIT; case SPRD_MIPI_DSI_FMT_DSC: return COLOR_CODE_COMPRESSTION; default: DRM_ERROR("Unsupported format (%d)\n", fmt); return COLOR_CODE_24BIT; } } #define ns_to_cycle(ns, byte_clk) \ DIV_ROUND_UP((ns) * (byte_clk), 1000000) static void sprd_dsi_init(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); u32 byte_clk = dsi->slave->hs_rate / 8; u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs; u16 max_rd_time; int div; writel(0, ctx->base + SOFT_RESET); writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + DSI_MODE_CFG); dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0); dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0); dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN); dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN); writel(1, ctx->base + TA_EN); dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0); dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0); div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate); writel(div, ctx->base + TX_ESC_CLK_CONFIG); max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk); writel(max_rd_time, ctx->base + MAX_READ_TIME); data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk); data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk); clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk); clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk); dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp); dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs); dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp); dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs); writel(1, ctx->base + SOFT_RESET); } /* * Free up resources and shutdown host controller and PHY */ static void sprd_dsi_fini(struct dsi_context *ctx) { writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); writel(0, ctx->base + SOFT_RESET); } /* * If not in burst mode, it will compute the video and null packet sizes * according to necessity. * Configure timers for data lanes and/or clock lane to return to LP when * bandwidth is not filled by data. */ static int sprd_dsi_dpi_video(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); struct videomode *vm = &ctx->vm; u32 byte_clk = dsi->slave->hs_rate / 8; u16 bpp_x100; u16 video_size; u32 ratio_x1000; u16 null_pkt_size = 0; u8 video_size_step; u32 hs_to; u32 total_bytes; u32 bytes_per_chunk; u32 chunks = 0; u32 bytes_left = 0; u32 chunk_overhead; const u8 pkt_header = 6; u8 coding; int div; u16 hline; u16 byte_cycle; coding = fmt_to_coding(dsi->slave->format); video_size = round_video_size(coding, vm->hactive); bpp_x100 = calc_bytes_per_pixel_x100(coding); video_size_step = calc_video_size_step(coding); ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000); hline = vm->hactive + vm->hsync_len + vm->hfront_porch + vm->hback_porch; writel(0, ctx->base + SOFT_RESET); dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en); dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode); byte_cycle = 95 * hline * ratio_x1000 / 100000; dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle); byte_cycle = hline * ratio_x1000 / 1000; writel(byte_cycle, ctx->base + VIDEO_LINE_TIME); byte_cycle = vm->hsync_len * ratio_x1000 / 1000; dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle); byte_cycle = vm->hback_porch * ratio_x1000 / 1000; dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle); writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len); dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN); hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100; for (div = 0x80; (div < hs_to) && (div > 2); div--) { if ((hs_to % div) == 0) { writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG); writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG); writel(hs_to / div, ctx->base + HTX_TO_CONFIG); break; } } if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) { dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); writel(0, ctx->base + VIDEO_NULLPKT_SIZE); dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0); } else { /* non burst transmission */ null_pkt_size = 0; /* bytes to be sent - first as one chunk */ bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header; /* hline total bytes from the DPI interface */ total_bytes = (vm->hactive + vm->hfront_porch) * ratio_x1000 / dsi->slave->lanes / 1000; /* check if the pixels actually fit on the DSI link */ if (total_bytes < bytes_per_chunk) { drm_err(dsi->drm, "current resolution can not be set\n"); return -EINVAL; } chunk_overhead = total_bytes - bytes_per_chunk; /* overhead higher than 1 -> enable multi packets */ if (chunk_overhead > 1) { /* multi packets */ for (video_size = video_size_step; video_size < vm->hactive; video_size += video_size_step) { if (vm->hactive * 1000 / video_size % 1000) continue; chunks = vm->hactive / video_size; bytes_per_chunk = bpp_x100 * video_size / 100 + pkt_header; if (total_bytes >= (bytes_per_chunk * chunks)) { bytes_left = total_bytes - bytes_per_chunk * chunks; break; } } /* prevent overflow (unsigned - unsigned) */ if (bytes_left > (pkt_header * chunks)) { null_pkt_size = (bytes_left - pkt_header * chunks) / chunks; /* avoid register overflow */ if (null_pkt_size > 1023) null_pkt_size = 1023; } } else { /* single packet */ chunks = 1; /* must be a multiple of 4 except 18 loosely */ for (video_size = vm->hactive; (video_size % video_size_step) != 0; video_size++) ; } dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE); dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); } writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + SOFT_RESET); return 0; } static void sprd_dsi_edpi_video(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); const u32 fifo_depth = 1096; const u32 word_length = 4; u32 hactive = ctx->vm.hactive; u32 bpp_x100; u32 max_fifo_len; u8 coding; coding = fmt_to_coding(dsi->slave->format); bpp_x100 = calc_bytes_per_pixel_x100(coding); max_fifo_len = word_length * fifo_depth * 100 / bpp_x100; writel(0, ctx->base + SOFT_RESET); dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en); if (max_fifo_len > hactive) writel(hactive, ctx->base + DCS_WM_PKT_SIZE); else writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE); writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + SOFT_RESET); } /* * Send a packet on the generic interface, * this function has an active delay to wait for the buffer to clear. * The delay is limited to: * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time * the controller restricts the sending of. * * This function will not be able to send Null and Blanking packets due to * controller restriction */ static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type, const u8 *param, u16 len) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); u8 wc_lsbyte, wc_msbyte; u32 payload; int i, j, ret; if (vc > 3) return -EINVAL; /* 1st: for long packet, must config payload first */ ret = dsi_wait_tx_payload_fifo_empty(ctx); if (ret) { drm_err(dsi->drm, "tx payload fifo is not empty\n"); return ret; } if (len > 2) { for (i = 0, j = 0; i < len; i += j) { payload = 0; for (j = 0; (j < 4) && ((j + i) < (len)); j++) payload |= param[i + j] << (j * 8); writel(payload, ctx->base + GEN_PLD_DATA); } wc_lsbyte = len & 0xff; wc_msbyte = len >> 8; } else { wc_lsbyte = (len > 0) ? param[0] : 0; wc_msbyte = (len > 1) ? param[1] : 0; } /* 2nd: then set packet header */ ret = dsi_wait_tx_cmd_fifo_empty(ctx); if (ret) { drm_err(dsi->drm, "tx cmd fifo is not empty\n"); return ret; } writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16), ctx->base + GEN_HDR); return 0; } /* * Send READ packet to peripheral using the generic interface, * this will force command mode and stop video mode (because of BTA). * * This function has an active delay to wait for the buffer to clear, * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT * (waiting for command buffer, and waiting for receiving) * @note this function will enable BTA */ static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type, u8 msb_byte, u8 lsb_byte, u8 *buffer, u8 bytes_to_read) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int i, ret; int count = 0; u32 temp; if (vc > 3) return -EINVAL; /* 1st: send read command to peripheral */ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5); if (!ret) return -EIO; writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16), ctx->base + GEN_HDR); /* 2nd: wait peripheral response completed */ ret = dsi_wait_rd_resp_completed(ctx); if (ret) { drm_err(dsi->drm, "wait read response time out\n"); return ret; } /* 3rd: get data from rx payload fifo */ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); if (ret) { drm_err(dsi->drm, "rx payload fifo empty\n"); return -EIO; } for (i = 0; i < 100; i++) { temp = readl(ctx->base + GEN_PLD_DATA); if (count < bytes_to_read) buffer[count++] = temp & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 8) & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 16) & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 24) & 0xff; ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); if (ret) return count; } return 0; } static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode) { if (mode == DSI_MODE_CMD) writel(1, ctx->base + DSI_MODE_CFG); else writel(0, ctx->base + DSI_MODE_CFG); } static void sprd_dsi_state_reset(struct dsi_context *ctx) { writel(0, ctx->base + SOFT_RESET); udelay(100); writel(1, ctx->base + SOFT_RESET); } static int sprd_dphy_init(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int ret; dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); dphy_pll_config(ctx); dphy_timing_config(ctx); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); writel(0x1C, ctx->base + PHY_MIN_STOP_TIME); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG); ret = dphy_wait_pll_locked(ctx); if (ret) { drm_err(dsi->drm, "dphy initial failed\n"); return ret; } return 0; } static void sprd_dphy_fini(struct dsi_context *ctx) { dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); } static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm); } static void sprd_dsi_encoder_enable(struct drm_encoder *encoder) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); struct dsi_context *ctx = &dsi->ctx; if (ctx->enabled) { drm_warn(dsi->drm, "dsi is initialized\n"); return; } sprd_dsi_init(ctx); if (ctx->work_mode == DSI_MODE_VIDEO) sprd_dsi_dpi_video(ctx); else sprd_dsi_edpi_video(ctx); sprd_dphy_init(ctx); sprd_dsi_set_work_mode(ctx, ctx->work_mode); sprd_dsi_state_reset(ctx); if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN, AUTO_CLKLANE_CTRL_EN); } else { dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS, PHY_CLKLANE_TX_REQ_HS); dphy_wait_pll_locked(ctx); } sprd_dpu_run(dpu); ctx->enabled = true; } static void sprd_dsi_encoder_disable(struct drm_encoder *encoder) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); struct dsi_context *ctx = &dsi->ctx; if (!ctx->enabled) { drm_warn(dsi->drm, "dsi isn't initialized\n"); return; } sprd_dpu_stop(dpu); sprd_dphy_fini(ctx); sprd_dsi_fini(ctx); ctx->enabled = false; } static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = { .mode_set = sprd_dsi_encoder_mode_set, .enable = sprd_dsi_encoder_enable, .disable = sprd_dsi_encoder_disable }; static const struct drm_encoder_funcs sprd_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int sprd_dsi_encoder_init(struct sprd_dsi *dsi, struct device *dev) { struct drm_encoder *encoder = &dsi->encoder; u32 crtc_mask; int ret; crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node); if (!crtc_mask) { drm_err(dsi->drm, "failed to find crtc mask\n"); return -EINVAL; } drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask); encoder->possible_crtcs = crtc_mask; ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs, DRM_MODE_ENCODER_DSI, NULL); if (ret) { drm_err(dsi->drm, "failed to init dsi encoder\n"); return ret; } drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs); return 0; } static int sprd_dsi_bridge_init(struct sprd_dsi *dsi, struct device *dev) { int ret; dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(dsi->panel_bridge)) return PTR_ERR(dsi->panel_bridge); ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0); if (ret) return ret; return 0; } static int sprd_dsi_context_init(struct sprd_dsi *dsi, struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dsi_context *ctx = &dsi->ctx; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "failed to get I/O resource\n"); return -EINVAL; } ctx->base = devm_ioremap(dev, res->start, resource_size(res)); if (!ctx->base) { drm_err(dsi->drm, "failed to map dsi host registers\n"); return -ENXIO; } ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config); if (IS_ERR(ctx->regmap)) { drm_err(dsi->drm, "dphy regmap init failed\n"); return PTR_ERR(ctx->regmap); } ctx->data_hs2lp = 120; ctx->data_lp2hs = 500; ctx->clk_hs2lp = 4; ctx->clk_lp2hs = 15; ctx->max_rd_time = 6000; ctx->int0_mask = 0xffffffff; ctx->int1_mask = 0xffffffff; ctx->enabled = true; return 0; } static int sprd_dsi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct sprd_dsi *dsi = dev_get_drvdata(dev); int ret; dsi->drm = drm; ret = sprd_dsi_encoder_init(dsi, dev); if (ret) return ret; ret = sprd_dsi_bridge_init(dsi, dev); if (ret) return ret; ret = sprd_dsi_context_init(dsi, dev); if (ret) return ret; return 0; } static void sprd_dsi_unbind(struct device *dev, struct device *master, void *data) { struct sprd_dsi *dsi = dev_get_drvdata(dev); drm_of_panel_bridge_remove(dev->of_node, 1, 0); drm_encoder_cleanup(&dsi->encoder); } static const struct component_ops dsi_component_ops = { .bind = sprd_dsi_bind, .unbind = sprd_dsi_unbind, }; static int sprd_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *slave) { struct sprd_dsi *dsi = host_to_dsi(host); struct dsi_context *ctx = &dsi->ctx; dsi->slave = slave; if (slave->mode_flags & MIPI_DSI_MODE_VIDEO) ctx->work_mode = DSI_MODE_VIDEO; else ctx->work_mode = DSI_MODE_CMD; if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES; else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES; else ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS; return component_add(host->dev, &dsi_component_ops); } static int sprd_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *slave) { component_del(host->dev, &dsi_component_ops); return 0; } static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct sprd_dsi *dsi = host_to_dsi(host); const u8 *tx_buf = msg->tx_buf; if (msg->rx_buf && msg->rx_len) { u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0; u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0; return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type, msb, lsb, msg->rx_buf, msg->rx_len); } if (msg->tx_buf && msg->tx_len) return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type, tx_buf, msg->tx_len); return 0; } static const struct mipi_dsi_host_ops sprd_dsi_host_ops = { .attach = sprd_dsi_host_attach, .detach = sprd_dsi_host_detach, .transfer = sprd_dsi_host_transfer, }; static const struct of_device_id dsi_match_table[] = { { .compatible = "sprd,sharkl3-dsi-host" }, { /* sentinel */ }, }; static int sprd_dsi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sprd_dsi *dsi; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; dev_set_drvdata(dev, dsi); dsi->host.ops = &sprd_dsi_host_ops; dsi->host.dev = dev; return mipi_dsi_host_register(&dsi->host); } static int sprd_dsi_remove(struct platform_device *pdev) { struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev); mipi_dsi_host_unregister(&dsi->host); return 0; } struct platform_driver sprd_dsi_driver = { .probe = sprd_dsi_probe, .remove = sprd_dsi_remove, .driver = { .name = "sprd-dsi-drv", .of_match_table = dsi_match_table, }, }; MODULE_AUTHOR("Leon He <[email protected]>"); MODULE_AUTHOR("Kevin Tang <[email protected]>"); MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/sprd/sprd_dsi.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Unisoc Inc. */ #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "sprd_drm.h" #define DRIVER_NAME "sprd" #define DRIVER_DESC "Spreadtrum SoCs' DRM Driver" #define DRIVER_DATE "20200201" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 static const struct drm_mode_config_helper_funcs sprd_drm_mode_config_helper = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; static const struct drm_mode_config_funcs sprd_drm_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static void sprd_drm_mode_config_init(struct drm_device *drm) { drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = 8192; drm->mode_config.max_height = 8192; drm->mode_config.funcs = &sprd_drm_mode_config_funcs; drm->mode_config.helper_private = &sprd_drm_mode_config_helper; } DEFINE_DRM_GEM_DMA_FOPS(sprd_drm_fops); static struct drm_driver sprd_drm_drv = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &sprd_drm_fops, /* GEM Operations */ DRM_GEM_DMA_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, }; static int sprd_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm; struct sprd_drm *sprd; int ret; sprd = devm_drm_dev_alloc(dev, &sprd_drm_drv, struct sprd_drm, drm); if (IS_ERR(sprd)) return PTR_ERR(sprd); drm = &sprd->drm; platform_set_drvdata(pdev, drm); ret = drmm_mode_config_init(drm); if (ret) return ret; sprd_drm_mode_config_init(drm); /* bind and init sub drivers */ ret = component_bind_all(drm->dev, drm); if (ret) { drm_err(drm, "failed to bind all component.\n"); return ret; } /* vblank init */ ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret) { drm_err(drm, "failed to initialize vblank.\n"); goto err_unbind_all; } /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(drm); /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret < 0) goto err_kms_helper_poll_fini; return 0; err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm); err_unbind_all: component_unbind_all(drm->dev, drm); return ret; } static void sprd_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); component_unbind_all(drm->dev, drm); } static const struct component_master_ops drm_component_ops = { .bind = sprd_drm_bind, .unbind = sprd_drm_unbind, }; static int sprd_drm_probe(struct platform_device *pdev) { return drm_of_component_probe(&pdev->dev, component_compare_of, &drm_component_ops); } static int sprd_drm_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &drm_component_ops); return 0; } static void sprd_drm_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); if (!drm) { dev_warn(&pdev->dev, "drm device is not available, no shutdown\n"); return; } drm_atomic_helper_shutdown(drm); } static const struct of_device_id drm_match_table[] = { { .compatible = "sprd,display-subsystem", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, drm_match_table); static struct platform_driver sprd_drm_driver = { .probe = sprd_drm_probe, .remove = sprd_drm_remove, .shutdown = sprd_drm_shutdown, .driver = { .name = "sprd-drm-drv", .of_match_table = drm_match_table, }, }; static struct platform_driver *sprd_drm_drivers[] = { &sprd_drm_driver, &sprd_dpu_driver, &sprd_dsi_driver, }; static int __init sprd_drm_init(void) { if (drm_firmware_drivers_only()) return -ENODEV; return platform_register_drivers(sprd_drm_drivers, ARRAY_SIZE(sprd_drm_drivers)); } static void __exit sprd_drm_exit(void) { platform_unregister_drivers(sprd_drm_drivers, ARRAY_SIZE(sprd_drm_drivers)); } module_init(sprd_drm_init); module_exit(sprd_drm_exit); MODULE_AUTHOR("Leon He <[email protected]>"); MODULE_AUTHOR("Kevin Tang <[email protected]>"); MODULE_DESCRIPTION("Unisoc DRM KMS Master Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/sprd/sprd_drm.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Unisoc Inc. */ #include <asm/div64.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/regmap.h> #include <linux/string.h> #include "sprd_dsi.h" #define L 0 #define H 1 #define CLK 0 #define DATA 1 #define INFINITY 0xffffffff #define MIN_OUTPUT_FREQ (100) #define AVERAGE(a, b) (min(a, b) + abs((b) - (a)) / 2) /* sharkle */ #define VCO_BAND_LOW 750 #define VCO_BAND_MID 1100 #define VCO_BAND_HIGH 1500 #define PHY_REF_CLK 26000 static int dphy_calc_pll_param(struct dphy_pll *pll) { const u32 khz = 1000; const u32 mhz = 1000000; const unsigned long long factor = 100; unsigned long long tmp; int i; pll->potential_fvco = pll->freq / khz; pll->ref_clk = PHY_REF_CLK / khz; for (i = 0; i < 4; ++i) { if (pll->potential_fvco >= VCO_BAND_LOW && pll->potential_fvco <= VCO_BAND_HIGH) { pll->fvco = pll->potential_fvco; pll->out_sel = BIT(i); break; } pll->potential_fvco <<= 1; } if (pll->fvco == 0) return -EINVAL; if (pll->fvco >= VCO_BAND_LOW && pll->fvco <= VCO_BAND_MID) { /* vco band control */ pll->vco_band = 0x0; /* low pass filter control */ pll->lpf_sel = 1; } else if (pll->fvco > VCO_BAND_MID && pll->fvco <= VCO_BAND_HIGH) { pll->vco_band = 0x1; pll->lpf_sel = 0; } else { return -EINVAL; } pll->nint = pll->fvco / pll->ref_clk; tmp = pll->fvco * factor * mhz; do_div(tmp, pll->ref_clk); tmp = tmp - pll->nint * factor * mhz; tmp *= BIT(20); do_div(tmp, 100000000); pll->kint = (u32)tmp; pll->refin = 3; /* pre-divider bypass */ pll->sdm_en = true; /* use fraction N PLL */ pll->fdk_s = 0x1; /* fraction */ pll->cp_s = 0x0; pll->det_delay = 0x1; return 0; } static void dphy_set_pll_reg(struct dphy_pll *pll, struct regmap *regmap) { u8 reg_val[9] = {0}; int i; u8 reg_addr[] = { 0x03, 0x04, 0x06, 0x08, 0x09, 0x0a, 0x0b, 0x0e, 0x0f }; reg_val[0] = 1 | (1 << 1) | (pll->lpf_sel << 2); reg_val[1] = pll->div | (1 << 3) | (pll->cp_s << 5) | (pll->fdk_s << 7); reg_val[2] = pll->nint; reg_val[3] = pll->vco_band | (pll->sdm_en << 1) | (pll->refin << 2); reg_val[4] = pll->kint >> 12; reg_val[5] = pll->kint >> 4; reg_val[6] = pll->out_sel | ((pll->kint << 4) & 0xf); reg_val[7] = 1 << 4; reg_val[8] = pll->det_delay; for (i = 0; i < sizeof(reg_addr); ++i) { regmap_write(regmap, reg_addr[i], reg_val[i]); DRM_DEBUG("%02x: %02x\n", reg_addr[i], reg_val[i]); } } int dphy_pll_config(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); struct regmap *regmap = ctx->regmap; struct dphy_pll *pll = &ctx->pll; int ret; pll->freq = dsi->slave->hs_rate; /* FREQ = 26M * (NINT + KINT / 2^20) / out_sel */ ret = dphy_calc_pll_param(pll); if (ret) { drm_err(dsi->drm, "failed to calculate dphy pll parameters\n"); return ret; } dphy_set_pll_reg(pll, regmap); return 0; } static void dphy_set_timing_reg(struct regmap *regmap, int type, u8 val[]) { switch (type) { case REQUEST_TIME: regmap_write(regmap, 0x31, val[CLK]); regmap_write(regmap, 0x41, val[DATA]); regmap_write(regmap, 0x51, val[DATA]); regmap_write(regmap, 0x61, val[DATA]); regmap_write(regmap, 0x71, val[DATA]); regmap_write(regmap, 0x90, val[CLK]); regmap_write(regmap, 0xa0, val[DATA]); regmap_write(regmap, 0xb0, val[DATA]); regmap_write(regmap, 0xc0, val[DATA]); regmap_write(regmap, 0xd0, val[DATA]); break; case PREPARE_TIME: regmap_write(regmap, 0x32, val[CLK]); regmap_write(regmap, 0x42, val[DATA]); regmap_write(regmap, 0x52, val[DATA]); regmap_write(regmap, 0x62, val[DATA]); regmap_write(regmap, 0x72, val[DATA]); regmap_write(regmap, 0x91, val[CLK]); regmap_write(regmap, 0xa1, val[DATA]); regmap_write(regmap, 0xb1, val[DATA]); regmap_write(regmap, 0xc1, val[DATA]); regmap_write(regmap, 0xd1, val[DATA]); break; case ZERO_TIME: regmap_write(regmap, 0x33, val[CLK]); regmap_write(regmap, 0x43, val[DATA]); regmap_write(regmap, 0x53, val[DATA]); regmap_write(regmap, 0x63, val[DATA]); regmap_write(regmap, 0x73, val[DATA]); regmap_write(regmap, 0x92, val[CLK]); regmap_write(regmap, 0xa2, val[DATA]); regmap_write(regmap, 0xb2, val[DATA]); regmap_write(regmap, 0xc2, val[DATA]); regmap_write(regmap, 0xd2, val[DATA]); break; case TRAIL_TIME: regmap_write(regmap, 0x34, val[CLK]); regmap_write(regmap, 0x44, val[DATA]); regmap_write(regmap, 0x54, val[DATA]); regmap_write(regmap, 0x64, val[DATA]); regmap_write(regmap, 0x74, val[DATA]); regmap_write(regmap, 0x93, val[CLK]); regmap_write(regmap, 0xa3, val[DATA]); regmap_write(regmap, 0xb3, val[DATA]); regmap_write(regmap, 0xc3, val[DATA]); regmap_write(regmap, 0xd3, val[DATA]); break; case EXIT_TIME: regmap_write(regmap, 0x36, val[CLK]); regmap_write(regmap, 0x46, val[DATA]); regmap_write(regmap, 0x56, val[DATA]); regmap_write(regmap, 0x66, val[DATA]); regmap_write(regmap, 0x76, val[DATA]); regmap_write(regmap, 0x95, val[CLK]); regmap_write(regmap, 0xA5, val[DATA]); regmap_write(regmap, 0xB5, val[DATA]); regmap_write(regmap, 0xc5, val[DATA]); regmap_write(regmap, 0xd5, val[DATA]); break; case CLKPOST_TIME: regmap_write(regmap, 0x35, val[CLK]); regmap_write(regmap, 0x94, val[CLK]); break; /* the following just use default value */ case SETTLE_TIME: fallthrough; case TA_GET: fallthrough; case TA_GO: fallthrough; case TA_SURE: fallthrough; default: break; } } void dphy_timing_config(struct dsi_context *ctx) { struct regmap *regmap = ctx->regmap; struct dphy_pll *pll = &ctx->pll; const u32 factor = 2; const u32 scale = 100; u32 t_ui, t_byteck, t_half_byteck; u32 range[2], constant; u8 val[2]; u32 tmp = 0; /* t_ui: 1 ui, byteck: 8 ui, half byteck: 4 ui */ t_ui = 1000 * scale / (pll->freq / 1000); t_byteck = t_ui << 3; t_half_byteck = t_ui << 2; constant = t_ui << 1; /* REQUEST_TIME: HS T-LPX: LP-01 * For T-LPX, mipi spec defined min value is 50ns, * but maybe it shouldn't be too small, because BTA, * LP-10, LP-00, LP-01, all of this is related to T-LPX. */ range[L] = 50 * scale; range[H] = INFINITY; val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2; val[DATA] = val[CLK]; dphy_set_timing_reg(regmap, REQUEST_TIME, val); /* PREPARE_TIME: HS sequence: LP-00 */ range[L] = 38 * scale; range[H] = 95 * scale; tmp = AVERAGE(range[L], range[H]); val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; range[L] = 40 * scale + 4 * t_ui; range[H] = 85 * scale + 6 * t_ui; tmp |= AVERAGE(range[L], range[H]) << 16; val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; dphy_set_timing_reg(regmap, PREPARE_TIME, val); /* ZERO_TIME: HS-ZERO */ range[L] = 300 * scale; range[H] = INFINITY; val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff) - 525 * t_byteck / 100, t_byteck) - 2; range[L] = 145 * scale + 10 * t_ui; val[DATA] = DIV_ROUND_UP(range[L] * factor + ((tmp >> 16) & 0xffff) - 525 * t_byteck / 100, t_byteck) - 2; dphy_set_timing_reg(regmap, ZERO_TIME, val); /* TRAIL_TIME: HS-TRAIL */ range[L] = 60 * scale; range[H] = INFINITY; val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck); range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui); val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2; dphy_set_timing_reg(regmap, TRAIL_TIME, val); /* EXIT_TIME: */ range[L] = 100 * scale; range[H] = INFINITY; val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; val[DATA] = val[CLK]; dphy_set_timing_reg(regmap, EXIT_TIME, val); /* CLKPOST_TIME: */ range[L] = 60 * scale + 52 * t_ui; range[H] = INFINITY; val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; val[DATA] = val[CLK]; dphy_set_timing_reg(regmap, CLKPOST_TIME, val); /* SETTLE_TIME: * This time is used for receiver. So for transmitter, * it can be ignored. */ /* TA_GO: * transmitter drives bridge state(LP-00) before releasing control, * reg 0x1f default value: 0x04, which is good. */ /* TA_SURE: * After LP-10 state and before bridge state(LP-00), * reg 0x20 default value: 0x01, which is good. */ /* TA_GET: * receiver drives Bridge state(LP-00) before releasing control * reg 0x21 default value: 0x03, which is good. */ }
linux-master
drivers/gpu/drm/sprd/megacores_pll.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include <linux/highmem.h> #ifdef CONFIG_64BIT #define VMW_PPN_SIZE 8 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT64_1 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT64_2 #else #define VMW_PPN_SIZE 4 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT_0 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT_1 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT_2 #endif /* * struct vmw_mob - Structure containing page table and metadata for a * Guest Memory OBject. * * @num_pages Number of pages that make up the page table. * @pt_level The indirection level of the page table. 0-2. * @pt_root_page DMA address of the level 0 page of the page table. */ struct vmw_mob { struct vmw_bo *pt_bo; unsigned long num_pages; unsigned pt_level; dma_addr_t pt_root_page; uint32_t id; }; /* * struct vmw_otable - Guest Memory OBject table metadata * * @size: Size of the table (page-aligned). * @page_table: Pointer to a struct vmw_mob holding the page table. */ static const struct vmw_otable pre_dx_tables[] = { {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true}, {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true}, {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true}, {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true}, {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry), NULL, true} }; static const struct vmw_otable dx_tables[] = { {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true}, {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true}, {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true}, {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true}, {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry), NULL, true}, {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, }; static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob); static void vmw_mob_pt_setup(struct vmw_mob *mob, struct vmw_piter data_iter, unsigned long num_data_pages); static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo) { int ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); ttm_bo_unpin(bo); ttm_bo_unreserve(bo); } /* * vmw_setup_otable_base - Issue an object table base setup command to * the device * * @dev_priv: Pointer to a device private structure * @type: Type of object table base * @offset Start of table offset into dev_priv::otable_bo * @otable Pointer to otable metadata; * * This function returns -ENOMEM if it fails to reserve fifo space, * and may block waiting for fifo space. */ static int vmw_setup_otable_base(struct vmw_private *dev_priv, SVGAOTableType type, struct ttm_buffer_object *otable_bo, unsigned long offset, struct vmw_otable *otable) { struct { SVGA3dCmdHeader header; SVGA3dCmdSetOTableBase64 body; } *cmd; struct vmw_mob *mob; const struct vmw_sg_table *vsgt; struct vmw_piter iter; int ret; BUG_ON(otable->page_table != NULL); vsgt = vmw_bo_sg_table(otable_bo); vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); WARN_ON(!vmw_piter_next(&iter)); mob = vmw_mob_create(otable->size >> PAGE_SHIFT); if (unlikely(mob == NULL)) { DRM_ERROR("Failed creating OTable page table.\n"); return -ENOMEM; } if (otable->size <= PAGE_SIZE) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&iter); } else { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) goto out_no_populate; vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1; } cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; } memset(cmd, 0, sizeof(*cmd)); cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; cmd->header.size = sizeof(cmd->body); cmd->body.type = type; cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; cmd->body.sizeInBytes = otable->size; cmd->body.validSizeInBytes = 0; cmd->body.ptDepth = mob->pt_level; /* * The device doesn't support this, But the otable size is * determined at compile-time, so this BUG shouldn't trigger * randomly. */ BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); vmw_cmd_commit(dev_priv, sizeof(*cmd)); otable->page_table = mob; return 0; out_no_fifo: out_no_populate: vmw_mob_destroy(mob); return ret; } /* * vmw_takedown_otable_base - Issue an object table base takedown command * to the device * * @dev_priv: Pointer to a device private structure * @type: Type of object table base * */ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, SVGAOTableType type, struct vmw_otable *otable) { struct { SVGA3dCmdHeader header; SVGA3dCmdSetOTableBase body; } *cmd; struct ttm_buffer_object *bo; if (otable->page_table == NULL) return; bo = &otable->page_table->pt_bo->tbo; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return; memset(cmd, 0, sizeof(*cmd)); cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; cmd->header.size = sizeof(cmd->body); cmd->body.type = type; cmd->body.baseAddress = 0; cmd->body.sizeInBytes = 0; cmd->body.validSizeInBytes = 0; cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; vmw_cmd_commit(dev_priv, sizeof(*cmd)); if (bo) { int ret; ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); } vmw_mob_destroy(otable->page_table); otable->page_table = NULL; } static int vmw_otable_batch_setup(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) { unsigned long offset; unsigned long bo_size; struct vmw_otable *otables = batch->otables; SVGAOTableType i; int ret; bo_size = 0; for (i = 0; i < batch->num_otables; ++i) { if (!otables[i].enabled) continue; otables[i].size = PFN_ALIGN(otables[i].size); bo_size += otables[i].size; } ret = vmw_bo_create_and_populate(dev_priv, bo_size, VMW_BO_DOMAIN_WAITABLE_SYS, &batch->otable_bo); if (unlikely(ret != 0)) return ret; offset = 0; for (i = 0; i < batch->num_otables; ++i) { if (!batch->otables[i].enabled) continue; ret = vmw_setup_otable_base(dev_priv, i, &batch->otable_bo->tbo, offset, &otables[i]); if (unlikely(ret != 0)) goto out_no_setup; offset += otables[i].size; } return 0; out_no_setup: for (i = 0; i < batch->num_otables; ++i) { if (batch->otables[i].enabled) vmw_takedown_otable_base(dev_priv, i, &batch->otables[i]); } vmw_bo_unpin_unlocked(&batch->otable_bo->tbo); ttm_bo_put(&batch->otable_bo->tbo); batch->otable_bo = NULL; return ret; } /* * vmw_otables_setup - Set up guest backed memory object tables * * @dev_priv: Pointer to a device private structure * * Takes care of the device guest backed surface * initialization, by setting up the guest backed memory object tables. * Returns 0 on success and various error codes on failure. A successful return * means the object tables can be taken down using the vmw_otables_takedown * function. */ int vmw_otables_setup(struct vmw_private *dev_priv) { struct vmw_otable **otables = &dev_priv->otable_batch.otables; int ret; if (has_sm4_context(dev_priv)) { *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), GFP_KERNEL); if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); } ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); if (unlikely(ret != 0)) goto out_setup; return 0; out_setup: kfree(*otables); return ret; } static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) { SVGAOTableType i; struct ttm_buffer_object *bo = &batch->otable_bo->tbo; int ret; for (i = 0; i < batch->num_otables; ++i) if (batch->otables[i].enabled) vmw_takedown_otable_base(dev_priv, i, &batch->otables[i]); ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); vmw_bo_fence_single(bo, NULL); ttm_bo_unpin(bo); ttm_bo_unreserve(bo); vmw_bo_unreference(&batch->otable_bo); } /* * vmw_otables_takedown - Take down guest backed memory object tables * * @dev_priv: Pointer to a device private structure * * Take down the Guest Memory Object tables. */ void vmw_otables_takedown(struct vmw_private *dev_priv) { vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); kfree(dev_priv->otable_batch.otables); } /* * vmw_mob_calculate_pt_pages - Calculate the number of page table pages * needed for a guest backed memory object. * * @data_pages: Number of data pages in the memory object buffer. */ static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) { unsigned long data_size = data_pages * PAGE_SIZE; unsigned long tot_size = 0; while (likely(data_size > PAGE_SIZE)) { data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); data_size *= VMW_PPN_SIZE; tot_size += PFN_ALIGN(data_size); } return tot_size >> PAGE_SHIFT; } /* * vmw_mob_create - Create a mob, but don't populate it. * * @data_pages: Number of data pages of the underlying buffer object. */ struct vmw_mob *vmw_mob_create(unsigned long data_pages) { struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); if (unlikely(!mob)) return NULL; mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); return mob; } /* * vmw_mob_pt_populate - Populate the mob pagetable * * @mob: Pointer to the mob the pagetable of which we want to * populate. * * This function allocates memory to be used for the pagetable. * Returns ENOMEM if memory resources aren't sufficient and may * cause TTM buffer objects to be swapped out. */ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob) { BUG_ON(mob->pt_bo != NULL); return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, VMW_BO_DOMAIN_WAITABLE_SYS, &mob->pt_bo); } /** * vmw_mob_assign_ppn - Assign a value to a page table entry * * @addr: Pointer to pointer to page table entry. * @val: The page table entry * * Assigns a value to a page table entry pointed to by *@addr and increments * *@addr according to the page table entry size. */ #if (VMW_PPN_SIZE == 8) static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) { *((u64 *) *addr) = val >> PAGE_SHIFT; *addr += 2; } #else static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) { *(*addr)++ = val >> PAGE_SHIFT; } #endif /* * vmw_mob_build_pt - Build a pagetable * * @data_addr: Array of DMA addresses to the underlying buffer * object's data pages. * @num_data_pages: Number of buffer object data pages. * @pt_pages: Array of page pointers to the page table pages. * * Returns the number of page table pages actually used. * Uses atomic kmaps of highmem pages to avoid TLB thrashing. */ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, unsigned long num_data_pages, struct vmw_piter *pt_iter) { unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); unsigned long pt_page; u32 *addr, *save_addr; unsigned long i; struct page *page; for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { page = vmw_piter_page(pt_iter); save_addr = addr = kmap_atomic(page); for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { vmw_mob_assign_ppn(&addr, vmw_piter_dma_addr(data_iter)); if (unlikely(--num_data_pages == 0)) break; WARN_ON(!vmw_piter_next(data_iter)); } kunmap_atomic(save_addr); vmw_piter_next(pt_iter); } return num_pt_pages; } /* * vmw_mob_build_pt - Set up a multilevel mob pagetable * * @mob: Pointer to a mob whose page table needs setting up. * @data_addr Array of DMA addresses to the buffer object's data * pages. * @num_data_pages: Number of buffer object data pages. * * Uses tail recursion to set up a multilevel mob page table. */ static void vmw_mob_pt_setup(struct vmw_mob *mob, struct vmw_piter data_iter, unsigned long num_data_pages) { unsigned long num_pt_pages = 0; struct ttm_buffer_object *bo = &mob->pt_bo->tbo; struct vmw_piter save_pt_iter = {0}; struct vmw_piter pt_iter; const struct vmw_sg_table *vsgt; int ret; BUG_ON(num_data_pages == 0); ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); vsgt = vmw_bo_sg_table(bo); vmw_piter_start(&pt_iter, vsgt, 0); BUG_ON(!vmw_piter_next(&pt_iter)); mob->pt_level = 0; while (likely(num_data_pages > 1)) { ++mob->pt_level; BUG_ON(mob->pt_level > 2); save_pt_iter = pt_iter; num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, &pt_iter); data_iter = save_pt_iter; num_data_pages = num_pt_pages; } mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); ttm_bo_unreserve(bo); } /* * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. * * @mob: Pointer to a mob to destroy. */ void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { vmw_bo_unpin_unlocked(&mob->pt_bo->tbo); vmw_bo_unreference(&mob->pt_bo); } kfree(mob); } /* * vmw_mob_unbind - Hide a mob from the device. * * @dev_priv: Pointer to a device private. * @mob_id: Device id of the mob to unbind. */ void vmw_mob_unbind(struct vmw_private *dev_priv, struct vmw_mob *mob) { struct { SVGA3dCmdHeader header; SVGA3dCmdDestroyGBMob body; } *cmd; int ret; struct ttm_buffer_object *bo = &mob->pt_bo->tbo; if (bo) { ret = ttm_bo_reserve(bo, false, true, NULL); /* * Noone else should be using this buffer. */ BUG_ON(ret != 0); } cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (cmd) { cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; cmd->header.size = sizeof(cmd->body); cmd->body.mobid = mob->id; vmw_cmd_commit(dev_priv, sizeof(*cmd)); } if (bo) { vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); } vmw_fifo_resource_dec(dev_priv); } /* * vmw_mob_bind - Make a mob visible to the device after first * populating it if necessary. * * @dev_priv: Pointer to a device private. * @mob: Pointer to the mob we're making visible. * @data_addr: Array of DMA addresses to the data pages of the underlying * buffer object. * @num_data_pages: Number of data pages of the underlying buffer * object. * @mob_id: Device id of the mob to bind * * This function is intended to be interfaced with the ttm_tt backend * code. */ int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, const struct vmw_sg_table *vsgt, unsigned long num_data_pages, int32_t mob_id) { int ret; bool pt_set_up = false; struct vmw_piter data_iter; struct { SVGA3dCmdHeader header; SVGA3dCmdDefineGBMob64 body; } *cmd; mob->id = mob_id; vmw_piter_start(&data_iter, vsgt, 0); if (unlikely(!vmw_piter_next(&data_iter))) return 0; if (likely(num_data_pages == 1)) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&data_iter); } else if (unlikely(mob->pt_bo == NULL)) { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) return ret; vmw_mob_pt_setup(mob, data_iter, num_data_pages); pt_set_up = true; mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1; } vmw_fifo_resource_inc(dev_priv); cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) goto out_no_cmd_space; cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; cmd->header.size = sizeof(cmd->body); cmd->body.mobid = mob_id; cmd->body.ptDepth = mob->pt_level; cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { vmw_bo_unpin_unlocked(&mob->pt_bo->tbo); vmw_bo_unreference(&mob->pt_bo); } return -ENOMEM; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_drv.h" #include "vmwgfx_devcaps.h" #include "vmwgfx_kms.h" #include <drm/vmwgfx_drm.h> #include <linux/pci.h> int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: param->value = vmw_overlay_num_overlays(dev_priv); break; case DRM_VMW_PARAM_NUM_FREE_STREAMS: param->value = vmw_overlay_num_free_overlays(dev_priv); break; case DRM_VMW_PARAM_3D: param->value = vmw_supports_3d(dev_priv) ? 1 : 0; break; case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; break; case DRM_VMW_PARAM_HW_CAPS2: param->value = dev_priv->capabilities2; break; case DRM_VMW_PARAM_FIFO_CAPS: param->value = vmw_fifo_caps(dev_priv); break; case DRM_VMW_PARAM_MAX_FB_SIZE: param->value = dev_priv->max_primary_mem; break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) param->value = SVGA3D_HWVERSION_WS8_B1; else param->value = vmw_fifo_mem_read( dev_priv, ((vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? SVGA_FIFO_3D_HWVERSION_REVISED : SVGA_FIFO_3D_HWVERSION)); break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && !vmw_fp->gb_aware) param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; else param->value = dev_priv->memory_size; break; case DRM_VMW_PARAM_3D_CAPS_SIZE: param->value = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware); break; case DRM_VMW_PARAM_MAX_MOB_MEMORY: vmw_fp->gb_aware = true; param->value = dev_priv->max_mob_pages * PAGE_SIZE; break; case DRM_VMW_PARAM_MAX_MOB_SIZE: param->value = dev_priv->max_mob_size; break; case DRM_VMW_PARAM_SCREEN_TARGET: param->value = (dev_priv->active_display_unit == vmw_du_screen_target); break; case DRM_VMW_PARAM_DX: param->value = has_sm4_context(dev_priv); break; case DRM_VMW_PARAM_SM4_1: param->value = has_sm4_1_context(dev_priv); break; case DRM_VMW_PARAM_SM5: param->value = has_sm5_context(dev_priv); break; case DRM_VMW_PARAM_GL43: param->value = has_gl43_context(dev_priv); break; case DRM_VMW_PARAM_DEVICE_ID: param->value = to_pci_dev(dev_priv->drm.dev)->device; break; default: return -EINVAL; } return 0; } int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_get_3d_cap_arg *arg = (struct drm_vmw_get_3d_cap_arg *) data; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t size; void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); void *bounce = NULL; int ret; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } size = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware); if (unlikely(size == 0)) { DRM_ERROR("Failed to figure out the devcaps size (no 3D).\n"); return -ENOMEM; } if (arg->max_size < size) size = arg->max_size; bounce = vzalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } ret = vmw_devcaps_copy(dev_priv, vmw_fp->gb_aware, bounce, size); if (unlikely (ret != 0)) goto out_err; ret = copy_to_user(buffer, bounce, size); if (ret) ret = -EFAULT; out_err: vfree(bounce); if (unlikely(ret != 0)) DRM_ERROR("Failed to report 3D caps info.\n"); return ret; } int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_arg *arg = (struct drm_vmw_present_arg *)data; struct vmw_surface *surface; struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_framebuffer *fb; struct vmw_framebuffer *vfb; struct vmw_resource *res; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { VMW_DEBUG_USER("Variable clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } drm_modeset_lock_all(dev); fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id); if (!fb) { VMW_DEBUG_USER("Invalid framebuffer id.\n"); ret = -ENOENT; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(fb); ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, user_surface_converter, &res); if (ret) goto out_no_surface; surface = vmw_res_to_srf(res); ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, clips, num_clips); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); out_no_surface: drm_framebuffer_put(fb); out_no_fb: drm_modeset_unlock_all(dev); out_no_copy: kfree(clips); out_clips: return ret; } int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_readback_arg *arg = (struct drm_vmw_present_readback_arg *)data; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_framebuffer *fb; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { VMW_DEBUG_USER("Argument clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } drm_modeset_lock_all(dev); fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id); if (!fb) { VMW_DEBUG_USER("Invalid framebuffer id.\n"); ret = -ENOENT; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(fb); if (!vfb->bo) { VMW_DEBUG_USER("Framebuffer not buffer backed.\n"); ret = -EINVAL; goto out_no_ttm_lock; } ret = vmw_kms_readback(dev_priv, file_priv, vfb, user_fence_rep, clips, num_clips); out_no_ttm_lock: drm_framebuffer_put(fb); out_no_fb: drm_modeset_unlock_all(dev); out_no_copy: kfree(clips); out_clips: return ret; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_bo.h" #include "vmwgfx_kms.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #define vmw_crtc_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.crtc) #define vmw_encoder_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.encoder) #define vmw_connector_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.connector) /** * struct vmw_kms_sou_surface_dirty - Closure structure for * blit surface to screen command. * @base: The base type we derive from. Used by vmw_kms_helper_dirty(). * @left: Left side of bounding box. * @right: Right side of bounding box. * @top: Top side of bounding box. * @bottom: Bottom side of bounding box. * @dst_x: Difference between source clip rects and framebuffer coordinates. * @dst_y: Difference between source clip rects and framebuffer coordinates. * @sid: Surface id of surface to copy from. */ struct vmw_kms_sou_surface_dirty { struct vmw_kms_dirty base; s32 left, right, top, bottom; s32 dst_x, dst_y; u32 sid; }; /* * SVGA commands that are used by this code. Please see the device headers * for explanation. */ struct vmw_kms_sou_readback_blit { uint32 header; SVGAFifoCmdBlitScreenToGMRFB body; }; struct vmw_kms_sou_bo_blit { uint32 header; SVGAFifoCmdBlitGMRFBToScreen body; }; struct vmw_kms_sou_dirty_cmd { SVGA3dCmdHeader header; SVGA3dCmdBlitSurfaceToScreen body; }; struct vmw_kms_sou_define_gmrfb { uint32_t header; SVGAFifoCmdDefineGMRFB body; }; /* * Display unit using screen objects. */ struct vmw_screen_object_unit { struct vmw_display_unit base; unsigned long buffer_size; /**< Size of allocated buffer */ struct vmw_bo *buffer; /**< Backing store buffer */ bool defined; }; static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) { vmw_du_cleanup(&sou->base); kfree(sou); } /* * Screen Object Display Unit CRTC functions */ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) { vmw_sou_destroy(vmw_crtc_to_sou(crtc)); } /* * Send the fifo command to create a screen. */ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, struct vmw_screen_object_unit *sou, int x, int y, struct drm_display_mode *mode) { size_t fifo_size; struct { struct { uint32_t cmdType; } header; SVGAScreenObject obj; } *cmd; BUG_ON(!sou->buffer); fifo_size = sizeof(*cmd); cmd = VMW_CMD_RESERVE(dev_priv, fifo_size); if (unlikely(cmd == NULL)) return -ENOMEM; memset(cmd, 0, fifo_size); cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN; cmd->obj.structSize = sizeof(SVGAScreenObject); cmd->obj.id = sou->base.unit; cmd->obj.flags = SVGA_SCREEN_HAS_ROOT | (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); cmd->obj.size.width = mode->hdisplay; cmd->obj.size.height = mode->vdisplay; cmd->obj.root.x = x; cmd->obj.root.y = y; sou->base.set_gui_x = cmd->obj.root.x; sou->base.set_gui_y = cmd->obj.root.y; /* Ok to assume that buffer is pinned in vram */ vmw_bo_get_guest_ptr(&sou->buffer->tbo, &cmd->obj.backingStore.ptr); cmd->obj.backingStore.pitch = mode->hdisplay * 4; vmw_cmd_commit(dev_priv, fifo_size); sou->defined = true; return 0; } /* * Send the fifo command to destroy a screen. */ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, struct vmw_screen_object_unit *sou) { size_t fifo_size; int ret; struct { struct { uint32_t cmdType; } header; SVGAFifoCmdDestroyScreen body; } *cmd; /* no need to do anything */ if (unlikely(!sou->defined)) return 0; fifo_size = sizeof(*cmd); cmd = VMW_CMD_RESERVE(dev_priv, fifo_size); if (unlikely(cmd == NULL)) return -ENOMEM; memset(cmd, 0, fifo_size); cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN; cmd->body.screenId = sou->base.unit; vmw_cmd_commit(dev_priv, fifo_size); /* Force sync */ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); if (unlikely(ret != 0)) DRM_ERROR("Failed to sync with HW"); else sou->defined = false; return ret; } /** * vmw_sou_crtc_mode_set_nofb - Create new screen * * @crtc: CRTC associated with the new screen * * This function creates/destroys a screen. This function cannot fail, so if * somehow we run into a failure, just do the best we can to get out. */ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_plane_state *ps; struct vmw_plane_state *vps; int ret; sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); ps = crtc->primary->state; fb = ps->fb; vps = vmw_plane_state_to_vps(ps); vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; if (sou->defined) { ret = vmw_sou_fifo_destroy(dev_priv, sou); if (ret) { DRM_ERROR("Failed to destroy Screen Object\n"); return; } } if (vfb) { struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; int x, y; sou->buffer = vps->bo; sou->buffer_size = vps->bo_size; conn_state = sou->base.connector.state; vmw_conn_state = vmw_connector_state_to_vcs(conn_state); x = vmw_conn_state->gui_x; y = vmw_conn_state->gui_y; ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode); if (ret) DRM_ERROR("Failed to define Screen Object %dx%d\n", crtc->x, crtc->y); } else { sou->buffer = NULL; sou->buffer_size = 0; } } /** * vmw_sou_crtc_helper_prepare - Noop * * @crtc: CRTC associated with the new screen * * Prepares the CRTC for a mode set, but we don't need to do anything here. */ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) { } /** * vmw_sou_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen * @state: Unused * * This is called after a mode set has been completed. */ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { } /** * vmw_sou_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off * @state: Unused */ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; int ret; if (!crtc) { DRM_ERROR("CRTC is NULL\n"); return; } sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); if (sou->defined) { ret = vmw_sou_fifo_destroy(dev_priv, sou); if (ret) DRM_ERROR("Failed to destroy Screen Object\n"); } } static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_sou_crtc_destroy, .reset = vmw_du_crtc_reset, .atomic_duplicate_state = vmw_du_crtc_duplicate_state, .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, }; /* * Screen Object Display Unit encoder functions */ static void vmw_sou_encoder_destroy(struct drm_encoder *encoder) { vmw_sou_destroy(vmw_encoder_to_sou(encoder)); } static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = { .destroy = vmw_sou_encoder_destroy, }; /* * Screen Object Display Unit connector functions */ static void vmw_sou_connector_destroy(struct drm_connector *connector) { vmw_sou_destroy(vmw_connector_to_sou(connector)); } static const struct drm_connector_funcs vmw_sou_connector_funcs = { .dpms = vmw_du_connector_dpms, .detect = vmw_du_connector_detect, .fill_modes = vmw_du_connector_fill_modes, .destroy = vmw_sou_connector_destroy, .reset = vmw_du_connector_reset, .atomic_duplicate_state = vmw_du_connector_duplicate_state, .atomic_destroy_state = vmw_du_connector_destroy_state, }; static const struct drm_connector_helper_funcs vmw_sou_connector_helper_funcs = { }; /* * Screen Object Display Plane Functions */ /** * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer * * @plane: display plane * @old_state: Contains the FB to clean up * * Unpins the display surface * * Returns 0 on success */ static void vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); struct drm_crtc *crtc = plane->state->crtc ? plane->state->crtc : old_state->crtc; if (vps->bo) vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); vmw_bo_unreference(&vps->bo); vps->bo_size = 0; vmw_du_plane_cleanup_fb(plane, old_state); } /** * vmw_sou_primary_plane_prepare_fb - allocate backing buffer * * @plane: display plane * @new_state: info on the new plane state, including the FB * * The SOU backing buffer is our equivalent of the display plane. * * Returns 0 on success */ static int vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_framebuffer *new_fb = new_state->fb; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; int ret; struct vmw_bo_params bo_params = { .domain = VMW_BO_DOMAIN_VRAM, .busy_domain = VMW_BO_DOMAIN_VRAM, .bo_type = ttm_bo_type_device, .pin = true }; if (!new_fb) { vmw_bo_unreference(&vps->bo); vps->bo_size = 0; return 0; } bo_params.size = new_state->crtc_w * new_state->crtc_h * 4; dev_priv = vmw_priv(crtc->dev); if (vps->bo) { if (vps->bo_size == bo_params.size) { /* * Note that this might temporarily up the pin-count * to 2, until cleanup_fb() is called. */ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } vmw_bo_unreference(&vps->bo); vps->bo_size = 0; } vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo); vmw_overlay_resume_all(dev_priv); if (ret) return ret; vps->bo_size = bo_params.size; /* * TTM already thinks the buffer is pinned, but make sure the * pin_count is upped. */ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update, uint32_t num_hits) { return sizeof(struct vmw_kms_sou_define_gmrfb) + sizeof(struct vmw_kms_sou_bo_blit) * num_hits; } static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update, void *cmd) { struct vmw_framebuffer_bo *vfbbo = container_of(update->vfb, typeof(*vfbbo), base); struct vmw_kms_sou_define_gmrfb *gmr = cmd; int depth = update->vfb->base.format->depth; /* Emulate RGBA support, contrary to svga_reg.h this is not * supported by hosts. This is only a problem if we are reading * this value later and expecting what we uploaded back. */ if (depth == 32) depth = 24; gmr->header = SVGA_CMD_DEFINE_GMRFB; gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8; gmr->body.format.colorDepth = depth; gmr->body.format.reserved = 0; gmr->body.bytesPerLine = update->vfb->base.pitches[0]; vmw_bo_get_guest_ptr(&vfbbo->buffer->tbo, &gmr->body.ptr); return sizeof(*gmr); } static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *clip, uint32_t fb_x, uint32_t fb_y) { struct vmw_kms_sou_bo_blit *blit = cmd; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; blit->body.destScreenId = update->du->unit; blit->body.srcOrigin.x = fb_x; blit->body.srcOrigin.y = fb_y; blit->body.destRect.left = clip->x1; blit->body.destRect.top = clip->y1; blit->body.destRect.right = clip->x2; blit->body.destRect.bottom = clip->y2; return sizeof(*blit); } static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *bb) { return 0; } /** * vmw_sou_plane_update_bo - Update display unit for bo backed fb. * @dev_priv: Device private. * @plane: Plane state. * @old_state: Old plane state. * @vfb: Framebuffer which is blitted to display unit. * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj. * The returned fence pointer may be NULL in which case the device * has already synchronized. * * Return: 0 on success or a negative error code on failure. */ static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv, struct drm_plane *plane, struct drm_plane_state *old_state, struct vmw_framebuffer *vfb, struct vmw_fence_obj **out_fence) { struct vmw_du_update_plane_buffer bo_update; memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer)); bo_update.base.plane = plane; bo_update.base.old_state = old_state; bo_update.base.dev_priv = dev_priv; bo_update.base.du = vmw_crtc_to_du(plane->state->crtc); bo_update.base.vfb = vfb; bo_update.base.out_fence = out_fence; bo_update.base.mutex = NULL; bo_update.base.intr = true; bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size; bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb; bo_update.base.clip = vmw_sou_bo_populate_clip; bo_update.base.post_clip = vmw_stud_bo_post_clip; return vmw_du_helper_plane_update(&bo_update.base); } static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update, uint32_t num_hits) { return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) * num_hits; } static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update, void *cmd) { struct vmw_du_update_plane_surface *srf_update; srf_update = container_of(update, typeof(*srf_update), base); /* * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that * its bounding box is filled before iterating over all the clips. So * store the FIFO start address and revisit to fill the details. */ srf_update->cmd_start = cmd; return 0; } static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update, void *cmd, uint32_t num_hits) { struct vmw_kms_sou_dirty_cmd *blit = cmd; struct vmw_framebuffer_surface *vfbs; vfbs = container_of(update->vfb, typeof(*vfbs), base); blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN; blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) * num_hits; blit->body.srcImage.sid = vfbs->surface->res.id; blit->body.destScreenId = update->du->unit; /* Update the source and destination bounding box later in post_clip */ blit->body.srcRect.left = 0; blit->body.srcRect.top = 0; blit->body.srcRect.right = 0; blit->body.srcRect.bottom = 0; blit->body.destRect.left = 0; blit->body.destRect.top = 0; blit->body.destRect.right = 0; blit->body.destRect.bottom = 0; return sizeof(*blit); } static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *clip, uint32_t src_x, uint32_t src_y) { SVGASignedRect *rect = cmd; /* * rects are relative to dest bounding box rect on screen object, so * translate to it later in post_clip */ rect->left = clip->x1; rect->top = clip->y1; rect->right = clip->x2; rect->bottom = clip->y2; return sizeof(*rect); } static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *bb) { struct vmw_du_update_plane_surface *srf_update; struct drm_plane_state *state = update->plane->state; struct drm_rect src_bb; struct vmw_kms_sou_dirty_cmd *blit; SVGASignedRect *rect; uint32_t num_hits; int translate_src_x; int translate_src_y; int i; srf_update = container_of(update, typeof(*srf_update), base); blit = srf_update->cmd_start; rect = (SVGASignedRect *)&blit[1]; num_hits = (blit->header.size - sizeof(blit->body))/ sizeof(SVGASignedRect); src_bb = *bb; /* To translate bb back to fb src coord */ translate_src_x = (state->src_x >> 16) - state->crtc_x; translate_src_y = (state->src_y >> 16) - state->crtc_y; drm_rect_translate(&src_bb, translate_src_x, translate_src_y); blit->body.srcRect.left = src_bb.x1; blit->body.srcRect.top = src_bb.y1; blit->body.srcRect.right = src_bb.x2; blit->body.srcRect.bottom = src_bb.y2; blit->body.destRect.left = bb->x1; blit->body.destRect.top = bb->y1; blit->body.destRect.right = bb->x2; blit->body.destRect.bottom = bb->y2; /* rects are relative to dest bb rect */ for (i = 0; i < num_hits; i++) { rect->left -= bb->x1; rect->top -= bb->y1; rect->right -= bb->x1; rect->bottom -= bb->y1; rect++; } return 0; } /** * vmw_sou_plane_update_surface - Update display unit for surface backed fb. * @dev_priv: Device private. * @plane: Plane state. * @old_state: Old plane state. * @vfb: Framebuffer which is blitted to display unit * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj. * The returned fence pointer may be NULL in which case the device * has already synchronized. * * Return: 0 on success or a negative error code on failure. */ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv, struct drm_plane *plane, struct drm_plane_state *old_state, struct vmw_framebuffer *vfb, struct vmw_fence_obj **out_fence) { struct vmw_du_update_plane_surface srf_update; memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface)); srf_update.base.plane = plane; srf_update.base.old_state = old_state; srf_update.base.dev_priv = dev_priv; srf_update.base.du = vmw_crtc_to_du(plane->state->crtc); srf_update.base.vfb = vfb; srf_update.base.out_fence = out_fence; srf_update.base.mutex = &dev_priv->cmdbuf_mutex; srf_update.base.intr = true; srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size; srf_update.base.post_prepare = vmw_sou_surface_post_prepare; srf_update.base.pre_clip = vmw_sou_surface_pre_clip; srf_update.base.clip = vmw_sou_surface_clip_rect; srf_update.base.post_clip = vmw_sou_surface_post_clip; return vmw_du_helper_plane_update(&srf_update.base); } static void vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc *crtc = new_state->crtc; struct vmw_fence_obj *fence = NULL; int ret; /* In case of device error, maintain consistent atomic state */ if (crtc && new_state->fb) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_state->fb); if (vfb->bo) ret = vmw_sou_plane_update_bo(dev_priv, plane, old_state, vfb, &fence); else ret = vmw_sou_plane_update_surface(dev_priv, plane, old_state, vfb, &fence); if (ret != 0) DRM_ERROR("Failed to update screen.\n"); } else { /* Do nothing when fb and crtc is NULL (blank crtc) */ return; } if (fence) vmw_fence_obj_unreference(&fence); } static const struct drm_plane_funcs vmw_sou_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = vmw_du_primary_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, }; static const struct drm_plane_funcs vmw_sou_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = vmw_du_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, }; /* * Atomic Helpers */ static const struct drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = { .atomic_check = vmw_du_cursor_plane_atomic_check, .atomic_update = vmw_du_cursor_plane_atomic_update, .prepare_fb = vmw_du_cursor_plane_prepare_fb, .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, }; static const struct drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = { .atomic_check = vmw_du_primary_plane_atomic_check, .atomic_update = vmw_sou_primary_plane_atomic_update, .prepare_fb = vmw_sou_primary_plane_prepare_fb, .cleanup_fb = vmw_sou_primary_plane_cleanup_fb, }; static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { .prepare = vmw_sou_crtc_helper_prepare, .mode_set_nofb = vmw_sou_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, .atomic_enable = vmw_sou_crtc_atomic_enable, .atomic_disable = vmw_sou_crtc_atomic_disable, }; static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_screen_object_unit *sou; struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *primary; struct vmw_cursor_plane *cursor; struct drm_crtc *crtc; int ret; sou = kzalloc(sizeof(*sou), GFP_KERNEL); if (!sou) return -ENOMEM; sou->base.unit = unit; crtc = &sou->base.crtc; encoder = &sou->base.encoder; connector = &sou->base.connector; primary = &sou->base.primary; cursor = &sou->base.cursor; sou->base.pref_active = (unit == 0); sou->base.pref_width = dev_priv->initial_width; sou->base.pref_height = dev_priv->initial_height; sou->base.pref_mode = NULL; /* * Remove this after enabling atomic because property values can * only exist in a state object */ sou->base.is_implicit = false; /* Initialize primary plane */ ret = drm_universal_plane_init(dev, primary, 0, &vmw_sou_plane_funcs, vmw_primary_plane_formats, ARRAY_SIZE(vmw_primary_plane_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { DRM_ERROR("Failed to initialize primary plane"); goto err_free; } drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary); /* Initialize cursor plane */ ret = drm_universal_plane_init(dev, &cursor->base, 0, &vmw_sou_cursor_funcs, vmw_cursor_plane_formats, ARRAY_SIZE(vmw_cursor_plane_formats), NULL, DRM_PLANE_TYPE_CURSOR, NULL); if (ret) { DRM_ERROR("Failed to initialize cursor plane"); drm_plane_cleanup(&sou->base.primary); goto err_free; } drm_plane_helper_add(&cursor->base, &vmw_sou_cursor_plane_helper_funcs); ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to initialize connector\n"); goto err_free; } drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs); connector->status = vmw_du_connector_detect(connector, true); ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL); if (ret) { DRM_ERROR("Failed to initialize encoder\n"); goto err_free_connector; } (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; ret = drm_connector_register(connector); if (ret) { DRM_ERROR("Failed to register connector\n"); goto err_free_encoder; } ret = drm_crtc_init_with_planes(dev, crtc, primary, &cursor->base, &vmw_screen_object_crtc_funcs, NULL); if (ret) { DRM_ERROR("Failed to initialize CRTC\n"); goto err_free_unregister; } drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); drm_object_attach_property(&connector->base, dev_priv->hotplug_mode_update_property, 1); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); return 0; err_free_unregister: drm_connector_unregister(connector); err_free_encoder: drm_encoder_cleanup(encoder); err_free_connector: drm_connector_cleanup(connector); err_free: kfree(sou); return ret; } int vmw_kms_sou_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; int i; /* Screen objects won't work if GMR's aren't available */ if (!dev_priv->has_gmr) return -ENOSYS; if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { return -ENOSYS; } for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) vmw_sou_init(dev_priv, i); dev_priv->active_display_unit = vmw_du_screen_object; drm_mode_config_reset(dev); return 0; } static int do_bo_define_gmrfb(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer) { struct vmw_bo *buf = container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; int depth = framebuffer->base.format->depth; struct { uint32_t header; SVGAFifoCmdDefineGMRFB body; } *cmd; /* Emulate RGBA support, contrary to svga_reg.h this is not * supported by hosts. This is only a problem if we are reading * this value later and expecting what we uploaded back. */ if (depth == 32) depth = 24; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; cmd->header = SVGA_CMD_DEFINE_GMRFB; cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8; cmd->body.format.colorDepth = depth; cmd->body.format.reserved = 0; cmd->body.bytesPerLine = framebuffer->base.pitches[0]; /* Buffer is reserved in vram or GMR */ vmw_bo_get_guest_ptr(&buf->tbo, &cmd->body.ptr); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_sou_surface_fifo_commit - Callback to fill in and submit a * blit surface to screen command. * * @dirty: The closure structure. * * Fills in the missing fields in the command, and translates the cliprects * to match the destination bounding box encoded. */ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) { struct vmw_kms_sou_surface_dirty *sdirty = container_of(dirty, typeof(*sdirty), base); struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x; s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y; size_t region_size = dirty->num_hits * sizeof(SVGASignedRect); SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; int i; if (!dirty->num_hits) { vmw_cmd_commit(dirty->dev_priv, 0); return; } cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN; cmd->header.size = sizeof(cmd->body) + region_size; /* * Use the destination bounding box to specify destination - and * source bounding regions. */ cmd->body.destRect.left = sdirty->left; cmd->body.destRect.right = sdirty->right; cmd->body.destRect.top = sdirty->top; cmd->body.destRect.bottom = sdirty->bottom; cmd->body.srcRect.left = sdirty->left + trans_x; cmd->body.srcRect.right = sdirty->right + trans_x; cmd->body.srcRect.top = sdirty->top + trans_y; cmd->body.srcRect.bottom = sdirty->bottom + trans_y; cmd->body.srcImage.sid = sdirty->sid; cmd->body.destScreenId = dirty->unit->unit; /* Blits are relative to the destination rect. Translate. */ for (i = 0; i < dirty->num_hits; ++i, ++blit) { blit->left -= sdirty->left; blit->right -= sdirty->left; blit->top -= sdirty->top; blit->bottom -= sdirty->top; } vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd)); sdirty->left = sdirty->top = S32_MAX; sdirty->right = sdirty->bottom = S32_MIN; } /** * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect. * * @dirty: The closure structure * * Encodes a SVGASignedRect cliprect and updates the bounding box of the * BLIT_SURFACE_TO_SCREEN command. */ static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty) { struct vmw_kms_sou_surface_dirty *sdirty = container_of(dirty, typeof(*sdirty), base); struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; /* Destination rect. */ blit += dirty->num_hits; blit->left = dirty->unit_x1; blit->top = dirty->unit_y1; blit->right = dirty->unit_x2; blit->bottom = dirty->unit_y2; /* Destination bounding box */ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1); sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1); sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2); sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2); dirty->num_hits++; } /** * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer * * @dev_priv: Pointer to the device private structure. * @framebuffer: Pointer to the surface-buffer backed framebuffer. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. * @srf: Pointer to surface to blit from. If NULL, the surface attached * to @framebuffer will be used. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates. * @num_clips: Number of clip rects in @clips. * @inc: Increment to use when looping over @clips. * @out_fence: If non-NULL, will return a ref-counted pointer to a * struct vmw_fence_obj. The returned fence pointer may be NULL in which * case the device has already synchronized. * @crtc: If crtc is passed, perform surface dirty on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, struct vmw_resource *srf, s32 dest_x, s32 dest_y, unsigned num_clips, int inc, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_kms_sou_surface_dirty sdirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; if (!srf) srf = &vfbs->surface->res; ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE, NULL, NULL); if (ret) return ret; ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true); if (ret) goto out_unref; sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; sdirty.base.clip = vmw_sou_surface_clip; sdirty.base.dev_priv = dev_priv; sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) * num_clips; sdirty.base.crtc = crtc; sdirty.sid = srf->id; sdirty.left = sdirty.top = S32_MAX; sdirty.right = sdirty.bottom = S32_MIN; sdirty.dst_x = dest_x; sdirty.dst_y = dest_y; ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, NULL); return ret; out_unref: vmw_validation_unref_lists(&val_ctx); return ret; } /** * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips. * * @dirty: The closure structure. * * Commits a previously built command buffer of readback clips. */ static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { vmw_cmd_commit(dirty->dev_priv, 0); return; } vmw_cmd_commit(dirty->dev_priv, sizeof(struct vmw_kms_sou_bo_blit) * dirty->num_hits); } /** * vmw_sou_bo_clip - Callback to encode a readback cliprect. * * @dirty: The closure structure * * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. */ static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty) { struct vmw_kms_sou_bo_blit *blit = dirty->cmd; blit += dirty->num_hits; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; blit->body.destScreenId = dirty->unit->unit; blit->body.srcOrigin.x = dirty->fb_x; blit->body.srcOrigin.y = dirty->fb_y; blit->body.destRect.left = dirty->unit_x1; blit->body.destRect.top = dirty->unit_y1; blit->body.destRect.right = dirty->unit_x2; blit->body.destRect.bottom = dirty->unit_y2; dirty->num_hits++; } /** * vmw_kms_sou_do_bo_dirty - Dirty part of a buffer-object backed framebuffer * * @dev_priv: Pointer to the device private structure. * @framebuffer: Pointer to the buffer-object backed framebuffer. * @clips: Array of clip rects. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. * @num_clips: Number of clip rects in @clips. * @increment: Increment to use when looping over @clips. * @interruptible: Whether to perform waits interruptible if possible. * @out_fence: If non-NULL, will return a ref-counted pointer to a * struct vmw_fence_obj. The returned fence pointer may be NULL in which * case the device has already synchronized. * @crtc: If crtc is passed, perform bo dirty on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, unsigned num_clips, int increment, bool interruptible, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { struct vmw_bo *buf = container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; ret = vmw_validation_prepare(&val_ctx, NULL, interruptible); if (ret) goto out_unref; ret = do_bo_define_gmrfb(dev_priv, framebuffer); if (unlikely(ret != 0)) goto out_revert; dirty.crtc = crtc; dirty.fifo_commit = vmw_sou_bo_fifo_commit; dirty.clip = vmw_sou_bo_clip; dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) * num_clips; ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 0, 0, num_clips, increment, &dirty); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, NULL); return ret; out_revert: vmw_validation_revert(&val_ctx); out_unref: vmw_validation_unref_lists(&val_ctx); return ret; } /** * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips. * * @dirty: The closure structure. * * Commits a previously built command buffer of readback clips. */ static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { vmw_cmd_commit(dirty->dev_priv, 0); return; } vmw_cmd_commit(dirty->dev_priv, sizeof(struct vmw_kms_sou_readback_blit) * dirty->num_hits); } /** * vmw_sou_readback_clip - Callback to encode a readback cliprect. * * @dirty: The closure structure * * Encodes a BLIT_SCREEN_TO_GMRFB cliprect. */ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) { struct vmw_kms_sou_readback_blit *blit = dirty->cmd; blit += dirty->num_hits; blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; blit->body.srcScreenId = dirty->unit->unit; blit->body.destOrigin.x = dirty->fb_x; blit->body.destOrigin.y = dirty->fb_y; blit->body.srcRect.left = dirty->unit_x1; blit->body.srcRect.top = dirty->unit_y1; blit->body.srcRect.right = dirty->unit_x2; blit->body.srcRect.bottom = dirty->unit_y2; dirty->num_hits++; } /** * vmw_kms_sou_readback - Perform a readback from the screen object system to * a buffer-object backed framebuffer. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm_file identifying the caller. * Must be set to NULL if @user_fence_rep is NULL. * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * Must be set to non-NULL if @file_priv is non-NULL. * @vclips: Array of clip rects. * @num_clips: Number of clip rects in @vclips. * @crtc: If crtc is passed, readback on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ int vmw_kms_sou_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_rect *vclips, uint32_t num_clips, struct drm_crtc *crtc) { struct vmw_bo *buf = container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; ret = vmw_validation_prepare(&val_ctx, NULL, true); if (ret) goto out_unref; ret = do_bo_define_gmrfb(dev_priv, vfb); if (unlikely(ret != 0)) goto out_revert; dirty.crtc = crtc; dirty.fifo_commit = vmw_sou_readback_fifo_commit; dirty.clip = vmw_sou_readback_clip; dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) * num_clips; ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, 0, 0, num_clips, 1, &dirty); vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL, user_fence_rep); return ret; out_revert: vmw_validation_revert(&val_ctx); out_unref: vmw_validation_unref_lists(&val_ctx); return ret; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2017 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_drv.h" #include <linux/highmem.h> /* * Template that implements find_first_diff() for a generic * unsigned integer type. @size and return value are in bytes. */ #define VMW_FIND_FIRST_DIFF(_type) \ static size_t vmw_find_first_diff_ ## _type \ (const _type * dst, const _type * src, size_t size)\ { \ size_t i; \ \ for (i = 0; i < size; i += sizeof(_type)) { \ if (*dst++ != *src++) \ break; \ } \ \ return i; \ } /* * Template that implements find_last_diff() for a generic * unsigned integer type. Pointers point to the item following the * *end* of the area to be examined. @size and return value are in * bytes. */ #define VMW_FIND_LAST_DIFF(_type) \ static ssize_t vmw_find_last_diff_ ## _type( \ const _type * dst, const _type * src, size_t size) \ { \ while (size) { \ if (*--dst != *--src) \ break; \ \ size -= sizeof(_type); \ } \ return size; \ } /* * Instantiate find diff functions for relevant unsigned integer sizes, * assuming that wider integers are faster (including aligning) up to the * architecture native width, which is assumed to be 32 bit unless * CONFIG_64BIT is defined. */ VMW_FIND_FIRST_DIFF(u8); VMW_FIND_LAST_DIFF(u8); VMW_FIND_FIRST_DIFF(u16); VMW_FIND_LAST_DIFF(u16); VMW_FIND_FIRST_DIFF(u32); VMW_FIND_LAST_DIFF(u32); #ifdef CONFIG_64BIT VMW_FIND_FIRST_DIFF(u64); VMW_FIND_LAST_DIFF(u64); #endif /* We use size aligned copies. This computes (addr - align(addr)) */ #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1)) /* * Template to compute find_first_diff() for a certain integer type * including a head copy for alignment, and adjustment of parameters * for tail find or increased resolution find using an unsigned integer find * of smaller width. If finding is complete, and resolution is sufficient, * the macro executes a return statement. Otherwise it falls through. */ #define VMW_TRY_FIND_FIRST_DIFF(_type) \ do { \ unsigned int spill = SPILL(dst, _type); \ size_t diff_offs; \ \ if (spill && spill == SPILL(src, _type) && \ sizeof(_type) - spill <= size) { \ spill = sizeof(_type) - spill; \ diff_offs = vmw_find_first_diff_u8(dst, src, spill); \ if (diff_offs < spill) \ return round_down(offset + diff_offs, granularity); \ \ dst += spill; \ src += spill; \ size -= spill; \ offset += spill; \ spill = 0; \ } \ if (!spill && !SPILL(src, _type)) { \ size_t to_copy = size & ~(sizeof(_type) - 1); \ \ diff_offs = vmw_find_first_diff_ ## _type \ ((_type *) dst, (_type *) src, to_copy); \ if (diff_offs >= size || granularity == sizeof(_type)) \ return (offset + diff_offs); \ \ dst += diff_offs; \ src += diff_offs; \ size -= diff_offs; \ offset += diff_offs; \ } \ } while (0) \ /** * vmw_find_first_diff - find the first difference between dst and src * * @dst: The destination address * @src: The source address * @size: Number of bytes to compare * @granularity: The granularity needed for the return value in bytes. * return: The offset from find start where the first difference was * encountered in bytes. If no difference was found, the function returns * a value >= @size. */ static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size, size_t granularity) { size_t offset = 0; /* * Try finding with large integers if alignment allows, or we can * fix it. Fall through if we need better resolution or alignment * was bad. */ #ifdef CONFIG_64BIT VMW_TRY_FIND_FIRST_DIFF(u64); #endif VMW_TRY_FIND_FIRST_DIFF(u32); VMW_TRY_FIND_FIRST_DIFF(u16); return round_down(offset + vmw_find_first_diff_u8(dst, src, size), granularity); } /* * Template to compute find_last_diff() for a certain integer type * including a tail copy for alignment, and adjustment of parameters * for head find or increased resolution find using an unsigned integer find * of smaller width. If finding is complete, and resolution is sufficient, * the macro executes a return statement. Otherwise it falls through. */ #define VMW_TRY_FIND_LAST_DIFF(_type) \ do { \ unsigned int spill = SPILL(dst, _type); \ ssize_t location; \ ssize_t diff_offs; \ \ if (spill && spill <= size && spill == SPILL(src, _type)) { \ diff_offs = vmw_find_last_diff_u8(dst, src, spill); \ if (diff_offs) { \ location = size - spill + diff_offs - 1; \ return round_down(location, granularity); \ } \ \ dst -= spill; \ src -= spill; \ size -= spill; \ spill = 0; \ } \ if (!spill && !SPILL(src, _type)) { \ size_t to_copy = round_down(size, sizeof(_type)); \ \ diff_offs = vmw_find_last_diff_ ## _type \ ((_type *) dst, (_type *) src, to_copy); \ location = size - to_copy + diff_offs - sizeof(_type); \ if (location < 0 || granularity == sizeof(_type)) \ return location; \ \ dst -= to_copy - diff_offs; \ src -= to_copy - diff_offs; \ size -= to_copy - diff_offs; \ } \ } while (0) /** * vmw_find_last_diff - find the last difference between dst and src * * @dst: The destination address * @src: The source address * @size: Number of bytes to compare * @granularity: The granularity needed for the return value in bytes. * return: The offset from find start where the last difference was * encountered in bytes, or a negative value if no difference was found. */ static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size, size_t granularity) { dst += size; src += size; #ifdef CONFIG_64BIT VMW_TRY_FIND_LAST_DIFF(u64); #endif VMW_TRY_FIND_LAST_DIFF(u32); VMW_TRY_FIND_LAST_DIFF(u16); return round_down(vmw_find_last_diff_u8(dst, src, size) - 1, granularity); } /** * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a * struct vmw_diff_cpy. * * @diff: The struct vmw_diff_cpy closure argument (unused). * @dest: The copy destination. * @src: The copy source. * @n: Number of bytes to copy. */ void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n) { memcpy(dest, src, n); } /** * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference * * @diff: The struct vmw_diff_cpy used to track the modified bounding box. * @diff_offs: The offset from @diff->line_offset where the difference was * found. */ static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs) { size_t offs = (diff_offs + diff->line_offset) / diff->cpp; struct drm_rect *rect = &diff->rect; rect->x1 = min_t(int, rect->x1, offs); rect->x2 = max_t(int, rect->x2, offs + 1); rect->y1 = min_t(int, rect->y1, diff->line); rect->y2 = max_t(int, rect->y2, diff->line + 1); } /** * vmw_diff_memcpy - memcpy that creates a bounding box of modified content. * * @diff: The struct vmw_diff_cpy used to track the modified bounding box. * @dest: The copy destination. * @src: The copy source. * @n: Number of bytes to copy. * * In order to correctly track the modified content, the field @diff->line must * be pre-loaded with the current line number, the field @diff->line_offset must * be pre-loaded with the line offset in bytes where the copy starts, and * finally the field @diff->cpp need to be preloaded with the number of bytes * per unit in the horizontal direction of the area we're examining. * Typically bytes per pixel. * This is needed to know the needed granularity of the difference computing * operations. A higher cpp generally leads to faster execution at the cost of * bounding box width precision. */ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n) { ssize_t csize, byte_len; if (WARN_ON_ONCE(round_down(n, diff->cpp) != n)) return; /* TODO: Possibly use a single vmw_find_first_diff per line? */ csize = vmw_find_first_diff(dest, src, n, diff->cpp); if (csize < n) { vmw_adjust_rect(diff, csize); byte_len = diff->cpp; /* * Starting from where first difference was found, find * location of last difference, and then copy. */ diff->line_offset += csize; dest += csize; src += csize; n -= csize; csize = vmw_find_last_diff(dest, src, n, diff->cpp); if (csize >= 0) { byte_len += csize; vmw_adjust_rect(diff, csize); } memcpy(dest, src, byte_len); } diff->line_offset += n; } /** * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line * * @mapped_dst: Already mapped destination page index in @dst_pages. * @dst_addr: Kernel virtual address of mapped destination page. * @dst_pages: Array of destination bo pages. * @dst_num_pages: Number of destination bo pages. * @dst_prot: Destination bo page protection. * @mapped_src: Already mapped source page index in @dst_pages. * @src_addr: Kernel virtual address of mapped source page. * @src_pages: Array of source bo pages. * @src_num_pages: Number of source bo pages. * @src_prot: Source bo page protection. * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine. */ struct vmw_bo_blit_line_data { u32 mapped_dst; u8 *dst_addr; struct page **dst_pages; u32 dst_num_pages; pgprot_t dst_prot; u32 mapped_src; u8 *src_addr; struct page **src_pages; u32 src_num_pages; pgprot_t src_prot; struct vmw_diff_cpy *diff; }; /** * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another. * * @d: Blit data as described above. * @dst_offset: Destination copy start offset from start of bo. * @src_offset: Source copy start offset from start of bo. * @bytes_to_copy: Number of bytes to copy in this line. */ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, u32 dst_offset, u32 src_offset, u32 bytes_to_copy) { struct vmw_diff_cpy *diff = d->diff; while (bytes_to_copy) { u32 copy_size = bytes_to_copy; u32 dst_page = dst_offset >> PAGE_SHIFT; u32 src_page = src_offset >> PAGE_SHIFT; u32 dst_page_offset = dst_offset & ~PAGE_MASK; u32 src_page_offset = src_offset & ~PAGE_MASK; bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst; bool unmap_src = d->src_addr && (src_page != d->mapped_src || unmap_dst); copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset); copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset); if (unmap_src) { kunmap_atomic(d->src_addr); d->src_addr = NULL; } if (unmap_dst) { kunmap_atomic(d->dst_addr); d->dst_addr = NULL; } if (!d->dst_addr) { if (WARN_ON_ONCE(dst_page >= d->dst_num_pages)) return -EINVAL; d->dst_addr = kmap_atomic_prot(d->dst_pages[dst_page], d->dst_prot); if (!d->dst_addr) return -ENOMEM; d->mapped_dst = dst_page; } if (!d->src_addr) { if (WARN_ON_ONCE(src_page >= d->src_num_pages)) return -EINVAL; d->src_addr = kmap_atomic_prot(d->src_pages[src_page], d->src_prot); if (!d->src_addr) return -ENOMEM; d->mapped_src = src_page; } diff->do_cpy(diff, d->dst_addr + dst_page_offset, d->src_addr + src_page_offset, copy_size); bytes_to_copy -= copy_size; dst_offset += copy_size; src_offset += copy_size; } return 0; } /** * vmw_bo_cpu_blit - in-kernel cpu blit. * * @dst: Destination buffer object. * @dst_offset: Destination offset of blit start in bytes. * @dst_stride: Destination stride in bytes. * @src: Source buffer object. * @src_offset: Source offset of blit start in bytes. * @src_stride: Source stride in bytes. * @w: Width of blit. * @h: Height of blit. * @diff: The struct vmw_diff_cpy used to track the modified bounding box. * return: Zero on success. Negative error value on failure. Will print out * kernel warnings on caller bugs. * * Performs a CPU blit from one buffer object to another avoiding a full * bo vmap which may exhaust- or fragment vmalloc space. * On supported architectures (x86), we're using kmap_atomic which avoids * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems * reference already set-up mappings. * * Neither of the buffer objects may be placed in PCI memory * (Fixed memory in TTM terminology) when using this function. */ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, u32 dst_offset, u32 dst_stride, struct ttm_buffer_object *src, u32 src_offset, u32 src_stride, u32 w, u32 h, struct vmw_diff_cpy *diff) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false }; u32 j, initial_line = dst_offset / dst_stride; struct vmw_bo_blit_line_data d; int ret = 0; /* Buffer objects need to be either pinned or reserved: */ if (!(dst->pin_count)) dma_resv_assert_held(dst->base.resv); if (!(src->pin_count)) dma_resv_assert_held(src->base.resv); if (!ttm_tt_is_populated(dst->ttm)) { ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); if (ret) return ret; } if (!ttm_tt_is_populated(src->ttm)) { ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); if (ret) return ret; } d.mapped_dst = 0; d.mapped_src = 0; d.dst_addr = NULL; d.src_addr = NULL; d.dst_pages = dst->ttm->pages; d.src_pages = src->ttm->pages; d.dst_num_pages = PFN_UP(dst->resource->size); d.src_num_pages = PFN_UP(src->resource->size); d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL); d.diff = diff; for (j = 0; j < h; ++j) { diff->line = j + initial_line; diff->line_offset = dst_offset % dst_stride; ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w); if (ret) goto out; dst_offset += dst_stride; src_offset += src_stride; } out: if (d.src_addr) kunmap_atomic(d.src_addr); if (d.dst_addr) kunmap_atomic(d.dst_addr); return ret; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /****************************************************************************** * * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * ******************************************************************************/ #include "vmwgfx_bo.h" #include "vmwgfx_kms.h" #include "vmw_surface_cache.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #define vmw_crtc_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.crtc) #define vmw_encoder_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.encoder) #define vmw_connector_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.connector) enum stdu_content_type { SAME_AS_DISPLAY = 0, SEPARATE_SURFACE, SEPARATE_BO }; /** * struct vmw_stdu_dirty - closure structure for the update functions * * @base: The base type we derive from. Used by vmw_kms_helper_dirty(). * @transfer: Transfer direction for DMA command. * @left: Left side of bounding box. * @right: Right side of bounding box. * @top: Top side of bounding box. * @bottom: Bottom side of bounding box. * @fb_left: Left side of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box * @pitch: framebuffer pitch (stride) * @buf: buffer object when DMA-ing between buffer and screen targets. * @sid: Surface ID when copying between surface and screen targets. */ struct vmw_stdu_dirty { struct vmw_kms_dirty base; s32 left, right, top, bottom; s32 fb_left, fb_top; u32 pitch; union { struct vmw_bo *buf; u32 sid; }; }; /* * SVGA commands that are used by this code. Please see the device headers * for explanation. */ struct vmw_stdu_update { SVGA3dCmdHeader header; SVGA3dCmdUpdateGBScreenTarget body; }; struct vmw_stdu_dma { SVGA3dCmdHeader header; SVGA3dCmdSurfaceDMA body; }; struct vmw_stdu_surface_copy { SVGA3dCmdHeader header; SVGA3dCmdSurfaceCopy body; }; struct vmw_stdu_update_gb_image { SVGA3dCmdHeader header; SVGA3dCmdUpdateGBImage body; }; /** * struct vmw_screen_target_display_unit * * @base: VMW specific DU structure * @display_srf: surface to be displayed. The dimension of this will always * match the display mode. If the display mode matches * content_vfbs dimensions, then this is a pointer into the * corresponding field in content_vfbs. If not, then this * is a separate buffer to which content_vfbs will blit to. * @content_fb_type: content_fb type * @display_width: display width * @display_height: display height * @defined: true if the current display unit has been initialized * @cpp: Bytes per pixel */ struct vmw_screen_target_display_unit { struct vmw_display_unit base; struct vmw_surface *display_srf; enum stdu_content_type content_fb_type; s32 display_width, display_height; bool defined; /* For CPU Blit */ unsigned int cpp; }; static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu); /****************************************************************************** * Screen Target Display Unit CRTC Functions *****************************************************************************/ /** * vmw_stdu_crtc_destroy - cleans up the STDU * * @crtc: used to get a reference to the containing STDU */ static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc) { vmw_stdu_destroy(vmw_crtc_to_stdu(crtc)); } /** * vmw_stdu_define_st - Defines a Screen Target * * @dev_priv: VMW DRM device * @stdu: display unit to create a Screen Target for * @mode: The mode to set. * @crtc_x: X coordinate of screen target relative to framebuffer origin. * @crtc_y: Y coordinate of screen target relative to framebuffer origin. * * Creates a STDU that we can used later. This function is called whenever the * framebuffer size changes. * * RETURNs: * 0 on success, error code on failure */ static int vmw_stdu_define_st(struct vmw_private *dev_priv, struct vmw_screen_target_display_unit *stdu, struct drm_display_mode *mode, int crtc_x, int crtc_y) { struct { SVGA3dCmdHeader header; SVGA3dCmdDefineGBScreenTarget body; } *cmd; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET; cmd->header.size = sizeof(cmd->body); cmd->body.stid = stdu->base.unit; cmd->body.width = mode->hdisplay; cmd->body.height = mode->vdisplay; cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0; cmd->body.dpi = 0; cmd->body.xRoot = crtc_x; cmd->body.yRoot = crtc_y; stdu->base.set_gui_x = cmd->body.xRoot; stdu->base.set_gui_y = cmd->body.yRoot; vmw_cmd_commit(dev_priv, sizeof(*cmd)); stdu->defined = true; stdu->display_width = mode->hdisplay; stdu->display_height = mode->vdisplay; return 0; } /** * vmw_stdu_bind_st - Binds a surface to a Screen Target * * @dev_priv: VMW DRM device * @stdu: display unit affected * @res: Buffer to bind to the screen target. Set to NULL to blank screen. * * Binding a surface to a Screen Target the same as flipping */ static int vmw_stdu_bind_st(struct vmw_private *dev_priv, struct vmw_screen_target_display_unit *stdu, const struct vmw_resource *res) { SVGA3dSurfaceImageId image; struct { SVGA3dCmdHeader header; SVGA3dCmdBindGBScreenTarget body; } *cmd; if (!stdu->defined) { DRM_ERROR("No screen target defined\n"); return -EINVAL; } /* Set up image using information in vfb */ memset(&image, 0, sizeof(image)); image.sid = res ? res->id : SVGA3D_INVALID_ID; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET; cmd->header.size = sizeof(cmd->body); cmd->body.stid = stdu->base.unit; cmd->body.image = image; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a * bounding box. * * @cmd: Pointer to command stream. * @unit: Screen target unit. * @left: Left side of bounding box. * @right: Right side of bounding box. * @top: Top side of bounding box. * @bottom: Bottom side of bounding box. */ static void vmw_stdu_populate_update(void *cmd, int unit, s32 left, s32 right, s32 top, s32 bottom) { struct vmw_stdu_update *update = cmd; update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET; update->header.size = sizeof(update->body); update->body.stid = unit; update->body.rect.x = left; update->body.rect.y = top; update->body.rect.w = right - left; update->body.rect.h = bottom - top; } /** * vmw_stdu_update_st - Full update of a Screen Target * * @dev_priv: VMW DRM device * @stdu: display unit affected * * This function needs to be called whenever the content of a screen * target has changed completely. Typically as a result of a backing * surface change. * * RETURNS: * 0 on success, error code on failure */ static int vmw_stdu_update_st(struct vmw_private *dev_priv, struct vmw_screen_target_display_unit *stdu) { struct vmw_stdu_update *cmd; if (!stdu->defined) { DRM_ERROR("No screen target defined"); return -EINVAL; } cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; vmw_stdu_populate_update(cmd, stdu->base.unit, 0, stdu->display_width, 0, stdu->display_height); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_stdu_destroy_st - Destroy a Screen Target * * @dev_priv: VMW DRM device * @stdu: display unit to destroy */ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv, struct vmw_screen_target_display_unit *stdu) { int ret; struct { SVGA3dCmdHeader header; SVGA3dCmdDestroyGBScreenTarget body; } *cmd; /* Nothing to do if not successfully defined */ if (unlikely(!stdu->defined)) return 0; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET; cmd->header.size = sizeof(cmd->body); cmd->body.stid = stdu->base.unit; vmw_cmd_commit(dev_priv, sizeof(*cmd)); /* Force sync */ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); if (unlikely(ret != 0)) DRM_ERROR("Failed to sync with HW"); stdu->defined = false; stdu->display_width = 0; stdu->display_height = 0; return ret; } /** * vmw_stdu_crtc_mode_set_nofb - Updates screen target size * * @crtc: CRTC associated with the screen target * * This function defines/destroys a screen target * */ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; int x, y, ret; stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); conn_state = stdu->base.connector.state; vmw_conn_state = vmw_connector_state_to_vcs(conn_state); if (stdu->defined) { ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); if (ret) DRM_ERROR("Failed to blank CRTC\n"); (void) vmw_stdu_update_st(dev_priv, stdu); ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to destroy Screen Target\n"); stdu->content_fb_type = SAME_AS_DISPLAY; } if (!crtc->state->enable) return; x = vmw_conn_state->gui_x; y = vmw_conn_state->gui_y; vmw_svga_enable(dev_priv); ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y); if (ret) DRM_ERROR("Failed to define Screen Target of size %dx%d\n", crtc->x, crtc->y); } static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) { } static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { } static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; int ret; if (!crtc) { DRM_ERROR("CRTC is NULL\n"); return; } stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); if (stdu->defined) { ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); if (ret) DRM_ERROR("Failed to blank CRTC\n"); (void) vmw_stdu_update_st(dev_priv, stdu); ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to destroy Screen Target\n"); stdu->content_fb_type = SAME_AS_DISPLAY; } } /** * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit * * @dirty: The closure structure. * * This function calculates the bounding box for all the incoming clips. */ static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); dirty->num_hits = 1; /* Calculate destination bounding box */ ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); /* * Calculate content bounding box. We only need the top-left * coordinate because width and height will be the same as the * destination bounding box above */ ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x); ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y); } /** * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object * * @dirty: The closure structure. * * For the special case when we cannot create a proxy surface in a * 2D VM, we have to do a CPU blit ourselves. */ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *ddirty = container_of(dirty, struct vmw_stdu_dirty, base); struct vmw_screen_target_display_unit *stdu = container_of(dirty->unit, typeof(*stdu), base); s32 width, height; s32 src_pitch, dst_pitch; struct ttm_buffer_object *src_bo, *dst_bo; u32 src_offset, dst_offset; struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp); if (!dirty->num_hits) return; width = ddirty->right - ddirty->left; height = ddirty->bottom - ddirty->top; if (width == 0 || height == 0) return; /* Assume we are blitting from Guest (bo) to Host (display_srf) */ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; src_bo = &stdu->display_srf->res.guest_memory_bo->tbo; src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp; dst_pitch = ddirty->pitch; dst_bo = &ddirty->buf->tbo; dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo, src_offset, src_pitch, width * stdu->cpp, height, &diff); } /** * vmw_kms_stdu_readback - Perform a readback from a buffer-object backed * framebuffer and the screen target system. * * @dev_priv: Pointer to the device private structure. * @file_priv: Pointer to a struct drm-file identifying the caller. May be * set to NULL, but then @user_fence_rep must also be set to NULL. * @vfb: Pointer to the buffer-object backed framebuffer. * @user_fence_rep: User-space provided structure for fence information. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. * @num_clips: Number of clip rects in @clips or @vclips. * @increment: Increment to use when looping over @clips or @vclips. * @crtc: If crtc is passed, perform stdu dma on that crtc only. * * If DMA-ing till the screen target system, the function will also notify * the screen target system that a bounding box of the cliprects has been * updated. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ int vmw_kms_stdu_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, uint32_t num_clips, int increment, struct drm_crtc *crtc) { struct vmw_bo *buf = container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_stdu_dirty ddirty; int ret; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); /* * The GMR domain might seem confusing because it might seem like it should * never happen with screen targets but e.g. the xorg vmware driver issues * CMD_SURFACE_DMA for various pixmap updates which might transition our bo to * a GMR. Instead of forcing another transition we can optimize the readback * by reading directly from the GMR. */ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR, VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR); ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; ret = vmw_validation_prepare(&val_ctx, NULL, true); if (ret) goto out_unref; ddirty.left = ddirty.top = S32_MAX; ddirty.right = ddirty.bottom = S32_MIN; ddirty.fb_left = ddirty.fb_top = S32_MAX; ddirty.pitch = vfb->base.pitches[0]; ddirty.buf = buf; ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit; ddirty.base.clip = vmw_stdu_bo_cpu_clip; ddirty.base.fifo_reserve_size = 0; ddirty.base.crtc = crtc; ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, 0, 0, num_clips, increment, &ddirty.base); vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL, user_fence_rep); return ret; out_unref: vmw_validation_unref_lists(&val_ctx); return ret; } /** * vmw_kms_stdu_surface_clip - Callback to encode a surface copy command cliprect * * @dirty: The closure structure. * * Encodes a surface copy command cliprect and updates the bounding box * for the copy. */ static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *sdirty = container_of(dirty, struct vmw_stdu_dirty, base); struct vmw_stdu_surface_copy *cmd = dirty->cmd; struct vmw_screen_target_display_unit *stdu = container_of(dirty->unit, typeof(*stdu), base); if (sdirty->sid != stdu->display_srf->res.id) { struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; blit += dirty->num_hits; blit->srcx = dirty->fb_x; blit->srcy = dirty->fb_y; blit->x = dirty->unit_x1; blit->y = dirty->unit_y1; blit->d = 1; blit->w = dirty->unit_x2 - dirty->unit_x1; blit->h = dirty->unit_y2 - dirty->unit_y1; } dirty->num_hits++; /* Destination bounding box */ sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1); sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1); sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2); sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2); } /** * vmw_kms_stdu_surface_fifo_commit - Callback to fill in and submit a surface * copy command. * * @dirty: The closure structure. * * Fills in the missing fields in a surface copy command, and encodes a screen * target update command. */ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty) { struct vmw_stdu_dirty *sdirty = container_of(dirty, struct vmw_stdu_dirty, base); struct vmw_screen_target_display_unit *stdu = container_of(dirty->unit, typeof(*stdu), base); struct vmw_stdu_surface_copy *cmd = dirty->cmd; struct vmw_stdu_update *update; size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits; size_t commit_size; if (!dirty->num_hits) { vmw_cmd_commit(dirty->dev_priv, 0); return; } if (sdirty->sid != stdu->display_srf->res.id) { struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; cmd->header.id = SVGA_3D_CMD_SURFACE_COPY; cmd->header.size = sizeof(cmd->body) + blit_size; cmd->body.src.sid = sdirty->sid; cmd->body.dest.sid = stdu->display_srf->res.id; update = (struct vmw_stdu_update *) &blit[dirty->num_hits]; commit_size = sizeof(*cmd) + blit_size + sizeof(*update); stdu->display_srf->res.res_dirty = true; } else { update = dirty->cmd; commit_size = sizeof(*update); } vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left, sdirty->right, sdirty->top, sdirty->bottom); vmw_cmd_commit(dirty->dev_priv, commit_size); sdirty->left = sdirty->top = S32_MAX; sdirty->right = sdirty->bottom = S32_MIN; } /** * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer * * @dev_priv: Pointer to the device private structure. * @framebuffer: Pointer to the surface-buffer backed framebuffer. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. * @srf: Pointer to surface to blit from. If NULL, the surface attached * to @framebuffer will be used. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates. * @num_clips: Number of clip rects in @clips. * @inc: Increment to use when looping over @clips. * @out_fence: If non-NULL, will return a ref-counted pointer to a * struct vmw_fence_obj. The returned fence pointer may be NULL in which * case the device has already synchronized. * @crtc: If crtc is passed, perform surface dirty on that crtc only. * * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, struct drm_vmw_rect *vclips, struct vmw_resource *srf, s32 dest_x, s32 dest_y, unsigned num_clips, int inc, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_stdu_dirty sdirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; if (!srf) srf = &vfbs->surface->res; ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE, NULL, NULL); if (ret) return ret; ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true); if (ret) goto out_unref; if (vfbs->is_bo_proxy) { ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); if (ret) goto out_finish; } sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit; sdirty.base.clip = vmw_kms_stdu_surface_clip; sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) * num_clips + sizeof(struct vmw_stdu_update); sdirty.base.crtc = crtc; sdirty.sid = srf->id; sdirty.left = sdirty.top = S32_MAX; sdirty.right = sdirty.bottom = S32_MIN; ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); out_finish: vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence, NULL); return ret; out_unref: vmw_validation_unref_lists(&val_ctx); return ret; } /* * Screen Target CRTC dispatch table */ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = { .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_stdu_crtc_destroy, .reset = vmw_du_crtc_reset, .atomic_duplicate_state = vmw_du_crtc_duplicate_state, .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, }; /****************************************************************************** * Screen Target Display Unit Encoder Functions *****************************************************************************/ /** * vmw_stdu_encoder_destroy - cleans up the STDU * * @encoder: used the get the containing STDU * * vmwgfx cleans up crtc/encoder/connector all at the same time so technically * this can be a no-op. Nevertheless, it doesn't hurt of have this in case * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't * get called. */ static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder) { vmw_stdu_destroy(vmw_encoder_to_stdu(encoder)); } static const struct drm_encoder_funcs vmw_stdu_encoder_funcs = { .destroy = vmw_stdu_encoder_destroy, }; /****************************************************************************** * Screen Target Display Unit Connector Functions *****************************************************************************/ /** * vmw_stdu_connector_destroy - cleans up the STDU * * @connector: used to get the containing STDU * * vmwgfx cleans up crtc/encoder/connector all at the same time so technically * this can be a no-op. Nevertheless, it doesn't hurt of have this in case * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't * get called. */ static void vmw_stdu_connector_destroy(struct drm_connector *connector) { vmw_stdu_destroy(vmw_connector_to_stdu(connector)); } static const struct drm_connector_funcs vmw_stdu_connector_funcs = { .dpms = vmw_du_connector_dpms, .detect = vmw_du_connector_detect, .fill_modes = vmw_du_connector_fill_modes, .destroy = vmw_stdu_connector_destroy, .reset = vmw_du_connector_reset, .atomic_duplicate_state = vmw_du_connector_duplicate_state, .atomic_destroy_state = vmw_du_connector_destroy_state, }; static const struct drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { }; /****************************************************************************** * Screen Target Display Plane Functions *****************************************************************************/ /** * vmw_stdu_primary_plane_cleanup_fb - Unpins the display surface * * @plane: display plane * @old_state: Contains the FB to clean up * * Unpins the display surface * * Returns 0 on success */ static void vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); if (vps->surf) WARN_ON(!vps->pinned); vmw_du_plane_cleanup_fb(plane, old_state); vps->content_fb_type = SAME_AS_DISPLAY; vps->cpp = 0; } /** * vmw_stdu_primary_plane_prepare_fb - Readies the display surface * * @plane: display plane * @new_state: info on the new plane state, including the FB * * This function allocates a new display surface if the content is * backed by a buffer object. The display surface is pinned here, and it'll * be unpinned in .cleanup_fb() * * Returns 0 on success */ static int vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct vmw_private *dev_priv = vmw_priv(plane->dev); struct drm_framebuffer *new_fb = new_state->fb; struct vmw_framebuffer *vfb; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); enum stdu_content_type new_content_type; struct vmw_framebuffer_surface *new_vfbs; uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h; int ret; /* No FB to prepare */ if (!new_fb) { if (vps->surf) { WARN_ON(vps->pinned != 0); vmw_surface_unreference(&vps->surf); } return 0; } vfb = vmw_framebuffer_to_vfb(new_fb); new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb); if (new_vfbs && new_vfbs->surface->metadata.base_size.width == hdisplay && new_vfbs->surface->metadata.base_size.height == vdisplay) new_content_type = SAME_AS_DISPLAY; else if (vfb->bo) new_content_type = SEPARATE_BO; else new_content_type = SEPARATE_SURFACE; if (new_content_type != SAME_AS_DISPLAY) { struct vmw_surface_metadata metadata = {0}; /* * If content buffer is a buffer object, then we have to * construct surface info */ if (new_content_type == SEPARATE_BO) { switch (new_fb->format->cpp[0]*8) { case 32: metadata.format = SVGA3D_X8R8G8B8; break; case 16: metadata.format = SVGA3D_R5G6B5; break; case 8: metadata.format = SVGA3D_P8; break; default: DRM_ERROR("Invalid format\n"); return -EINVAL; } metadata.mip_levels[0] = 1; metadata.num_sizes = 1; metadata.scanout = true; } else { metadata = new_vfbs->surface->metadata; } metadata.base_size.width = hdisplay; metadata.base_size.height = vdisplay; metadata.base_size.depth = 1; if (vps->surf) { struct drm_vmw_size cur_base_size = vps->surf->metadata.base_size; if (cur_base_size.width != metadata.base_size.width || cur_base_size.height != metadata.base_size.height || vps->surf->metadata.format != metadata.format) { WARN_ON(vps->pinned != 0); vmw_surface_unreference(&vps->surf); } } if (!vps->surf) { ret = vmw_gb_surface_define(dev_priv, &metadata, &vps->surf); if (ret != 0) { DRM_ERROR("Couldn't allocate STDU surface.\n"); return ret; } } } else { /* * prepare_fb and clean_fb should only take care of pinning * and unpinning. References are tracked by state objects. * The only time we add a reference in prepare_fb is if the * state object doesn't have a reference to begin with */ if (vps->surf) { WARN_ON(vps->pinned != 0); vmw_surface_unreference(&vps->surf); } vps->surf = vmw_surface_reference(new_vfbs->surface); } if (vps->surf) { /* Pin new surface before flipping */ ret = vmw_resource_pin(&vps->surf->res, false); if (ret) goto out_srf_unref; vps->pinned++; } vps->content_fb_type = new_content_type; /* * This should only happen if the buffer object is too large to create a * proxy surface for. */ if (vps->content_fb_type == SEPARATE_BO) vps->cpp = new_fb->pitches[0] / new_fb->width; return 0; out_srf_unref: vmw_surface_unreference(&vps->surf); return ret; } static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update, uint32_t num_hits) { return sizeof(struct vmw_stdu_update_gb_image) + sizeof(struct vmw_stdu_update); } static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update, void *cmd, uint32_t num_hits) { struct vmw_du_update_plane_buffer *bo_update = container_of(update, typeof(*bo_update), base); bo_update->fb_left = INT_MAX; bo_update->fb_top = INT_MAX; return 0; } static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *clip, uint32_t fb_x, uint32_t fb_y) { struct vmw_du_update_plane_buffer *bo_update = container_of(update, typeof(*bo_update), base); bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x); bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y); return 0; } static uint32_t vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *bb) { struct vmw_du_update_plane_buffer *bo_update; struct vmw_screen_target_display_unit *stdu; struct vmw_framebuffer_bo *vfbbo; struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0); struct vmw_stdu_update_gb_image *cmd_img = cmd; struct vmw_stdu_update *cmd_update; struct ttm_buffer_object *src_bo, *dst_bo; u32 src_offset, dst_offset; s32 src_pitch, dst_pitch; s32 width, height; bo_update = container_of(update, typeof(*bo_update), base); stdu = container_of(update->du, typeof(*stdu), base); vfbbo = container_of(update->vfb, typeof(*vfbbo), base); width = bb->x2 - bb->x1; height = bb->y2 - bb->y1; diff.cpp = stdu->cpp; dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo; dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp; src_bo = &vfbbo->buffer->tbo; src_pitch = update->vfb->base.pitches[0]; src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left * stdu->cpp; (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo, src_offset, src_pitch, width * stdu->cpp, height, &diff); if (drm_rect_visible(&diff.rect)) { SVGA3dBox *box = &cmd_img->body.box; cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; cmd_img->header.size = sizeof(cmd_img->body); cmd_img->body.image.sid = stdu->display_srf->res.id; cmd_img->body.image.face = 0; cmd_img->body.image.mipmap = 0; box->x = diff.rect.x1; box->y = diff.rect.y1; box->z = 0; box->w = drm_rect_width(&diff.rect); box->h = drm_rect_height(&diff.rect); box->d = 1; cmd_update = (struct vmw_stdu_update *)&cmd_img[1]; vmw_stdu_populate_update(cmd_update, stdu->base.unit, diff.rect.x1, diff.rect.x2, diff.rect.y1, diff.rect.y2); return sizeof(*cmd_img) + sizeof(*cmd_update); } return 0; } /** * vmw_stdu_plane_update_bo - Update display unit for bo backed fb. * @dev_priv: device private. * @plane: plane state. * @old_state: old plane state. * @vfb: framebuffer which is blitted to display unit. * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj. * The returned fence pointer may be NULL in which case the device * has already synchronized. * * Return: 0 on success or a negative error code on failure. */ static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv, struct drm_plane *plane, struct drm_plane_state *old_state, struct vmw_framebuffer *vfb, struct vmw_fence_obj **out_fence) { struct vmw_du_update_plane_buffer bo_update; memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer)); bo_update.base.plane = plane; bo_update.base.old_state = old_state; bo_update.base.dev_priv = dev_priv; bo_update.base.du = vmw_crtc_to_du(plane->state->crtc); bo_update.base.vfb = vfb; bo_update.base.out_fence = out_fence; bo_update.base.mutex = NULL; bo_update.base.intr = false; bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu; bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu; bo_update.base.clip = vmw_stdu_bo_clip_cpu; bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu; return vmw_du_helper_plane_update(&bo_update.base); } static uint32_t vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update, uint32_t num_hits) { struct vmw_framebuffer_surface *vfbs; uint32_t size = 0; vfbs = container_of(update->vfb, typeof(*vfbs), base); if (vfbs->is_bo_proxy) size += sizeof(struct vmw_stdu_update_gb_image) * num_hits; size += sizeof(struct vmw_stdu_update); return size; } static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update, uint32_t num_hits) { struct vmw_framebuffer_surface *vfbs; uint32_t size = 0; vfbs = container_of(update->vfb, typeof(*vfbs), base); if (vfbs->is_bo_proxy) size += sizeof(struct vmw_stdu_update_gb_image) * num_hits; size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) * num_hits + sizeof(struct vmw_stdu_update); return size; } static uint32_t vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd) { struct vmw_framebuffer_surface *vfbs; struct drm_plane_state *state = update->plane->state; struct drm_plane_state *old_state = update->old_state; struct vmw_stdu_update_gb_image *cmd_update = cmd; struct drm_atomic_helper_damage_iter iter; struct drm_rect clip; uint32_t copy_size = 0; vfbs = container_of(update->vfb, typeof(*vfbs), base); /* * proxy surface is special where a buffer object type fb is wrapped * in a surface and need an update gb image command to sync with device. */ drm_atomic_helper_damage_iter_init(&iter, old_state, state); drm_atomic_for_each_plane_damage(&iter, &clip) { SVGA3dBox *box = &cmd_update->body.box; cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; cmd_update->header.size = sizeof(cmd_update->body); cmd_update->body.image.sid = vfbs->surface->res.id; cmd_update->body.image.face = 0; cmd_update->body.image.mipmap = 0; box->x = clip.x1; box->y = clip.y1; box->z = 0; box->w = drm_rect_width(&clip); box->h = drm_rect_height(&clip); box->d = 1; copy_size += sizeof(*cmd_update); cmd_update++; } return copy_size; } static uint32_t vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd, uint32_t num_hits) { struct vmw_screen_target_display_unit *stdu; struct vmw_framebuffer_surface *vfbs; struct vmw_stdu_surface_copy *cmd_copy = cmd; stdu = container_of(update->du, typeof(*stdu), base); vfbs = container_of(update->vfb, typeof(*vfbs), base); cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY; cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) * num_hits; cmd_copy->body.src.sid = vfbs->surface->res.id; cmd_copy->body.dest.sid = stdu->display_srf->res.id; return sizeof(*cmd_copy); } static uint32_t vmw_stdu_surface_populate_clip(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *clip, uint32_t fb_x, uint32_t fb_y) { struct SVGA3dCopyBox *box = cmd; box->srcx = fb_x; box->srcy = fb_y; box->srcz = 0; box->x = clip->x1; box->y = clip->y1; box->z = 0; box->w = drm_rect_width(clip); box->h = drm_rect_height(clip); box->d = 1; return sizeof(*box); } static uint32_t vmw_stdu_surface_populate_update(struct vmw_du_update_plane *update, void *cmd, struct drm_rect *bb) { vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1, bb->y2); return sizeof(struct vmw_stdu_update); } /** * vmw_stdu_plane_update_surface - Update display unit for surface backed fb * @dev_priv: Device private * @plane: Plane state * @old_state: Old plane state * @vfb: Framebuffer which is blitted to display unit * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj. * The returned fence pointer may be NULL in which case the device * has already synchronized. * * Return: 0 on success or a negative error code on failure. */ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv, struct drm_plane *plane, struct drm_plane_state *old_state, struct vmw_framebuffer *vfb, struct vmw_fence_obj **out_fence) { struct vmw_du_update_plane srf_update; struct vmw_screen_target_display_unit *stdu; struct vmw_framebuffer_surface *vfbs; stdu = vmw_crtc_to_stdu(plane->state->crtc); vfbs = container_of(vfb, typeof(*vfbs), base); memset(&srf_update, 0, sizeof(struct vmw_du_update_plane)); srf_update.plane = plane; srf_update.old_state = old_state; srf_update.dev_priv = dev_priv; srf_update.du = vmw_crtc_to_du(plane->state->crtc); srf_update.vfb = vfb; srf_update.out_fence = out_fence; srf_update.mutex = &dev_priv->cmdbuf_mutex; srf_update.intr = true; if (vfbs->is_bo_proxy) srf_update.post_prepare = vmw_stdu_surface_update_proxy; if (vfbs->surface->res.id != stdu->display_srf->res.id) { srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size; srf_update.pre_clip = vmw_stdu_surface_populate_copy; srf_update.clip = vmw_stdu_surface_populate_clip; } else { srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size_same_display; } srf_update.post_clip = vmw_stdu_surface_populate_update; return vmw_du_helper_plane_update(&srf_update); } /** * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane * @plane: display plane * @state: Only used to get crtc info * * Formally update stdu->display_srf to the new plane, and bind the new * plane STDU. This function is called during the commit phase when * all the preparation have been done and all the configurations have * been checked. */ static void vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct drm_crtc *crtc = new_state->crtc; struct vmw_screen_target_display_unit *stdu; struct vmw_fence_obj *fence = NULL; struct vmw_private *dev_priv; int ret; /* If case of device error, maintain consistent atomic state */ if (crtc && new_state->fb) { struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_state->fb); stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); stdu->display_srf = vps->surf; stdu->content_fb_type = vps->content_fb_type; stdu->cpp = vps->cpp; ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res); if (ret) DRM_ERROR("Failed to bind surface to STDU.\n"); if (vfb->bo) ret = vmw_stdu_plane_update_bo(dev_priv, plane, old_state, vfb, &fence); else ret = vmw_stdu_plane_update_surface(dev_priv, plane, old_state, vfb, &fence); if (ret) DRM_ERROR("Failed to update STDU.\n"); } else { crtc = old_state->crtc; stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); /* Blank STDU when fb and crtc are NULL */ if (!stdu->defined) return; ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); if (ret) DRM_ERROR("Failed to blank STDU\n"); ret = vmw_stdu_update_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to update STDU.\n"); return; } if (fence) vmw_fence_obj_unreference(&fence); } static const struct drm_plane_funcs vmw_stdu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = vmw_du_primary_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, }; static const struct drm_plane_funcs vmw_stdu_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = vmw_du_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, }; /* * Atomic Helpers */ static const struct drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = { .atomic_check = vmw_du_cursor_plane_atomic_check, .atomic_update = vmw_du_cursor_plane_atomic_update, .prepare_fb = vmw_du_cursor_plane_prepare_fb, .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, }; static const struct drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = { .atomic_check = vmw_du_primary_plane_atomic_check, .atomic_update = vmw_stdu_primary_plane_atomic_update, .prepare_fb = vmw_stdu_primary_plane_prepare_fb, .cleanup_fb = vmw_stdu_primary_plane_cleanup_fb, }; static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { .prepare = vmw_stdu_crtc_helper_prepare, .mode_set_nofb = vmw_stdu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, .atomic_enable = vmw_stdu_crtc_atomic_enable, .atomic_disable = vmw_stdu_crtc_atomic_disable, }; /** * vmw_stdu_init - Sets up a Screen Target Display Unit * * @dev_priv: VMW DRM device * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS * * This function is called once per CRTC, and allocates one Screen Target * display unit to represent that CRTC. Since the SVGA device does not separate * out encoder and connector, they are represented as part of the STDU as well. */ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_screen_target_display_unit *stdu; struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *primary; struct vmw_cursor_plane *cursor; struct drm_crtc *crtc; int ret; stdu = kzalloc(sizeof(*stdu), GFP_KERNEL); if (!stdu) return -ENOMEM; stdu->base.unit = unit; crtc = &stdu->base.crtc; encoder = &stdu->base.encoder; connector = &stdu->base.connector; primary = &stdu->base.primary; cursor = &stdu->base.cursor; stdu->base.pref_active = (unit == 0); stdu->base.pref_width = dev_priv->initial_width; stdu->base.pref_height = dev_priv->initial_height; stdu->base.is_implicit = false; /* Initialize primary plane */ ret = drm_universal_plane_init(dev, primary, 0, &vmw_stdu_plane_funcs, vmw_primary_plane_formats, ARRAY_SIZE(vmw_primary_plane_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { DRM_ERROR("Failed to initialize primary plane"); goto err_free; } drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary); /* Initialize cursor plane */ ret = drm_universal_plane_init(dev, &cursor->base, 0, &vmw_stdu_cursor_funcs, vmw_cursor_plane_formats, ARRAY_SIZE(vmw_cursor_plane_formats), NULL, DRM_PLANE_TYPE_CURSOR, NULL); if (ret) { DRM_ERROR("Failed to initialize cursor plane"); drm_plane_cleanup(&stdu->base.primary); goto err_free; } drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs); ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to initialize connector\n"); goto err_free; } drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs); connector->status = vmw_du_connector_detect(connector, false); ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL); if (ret) { DRM_ERROR("Failed to initialize encoder\n"); goto err_free_connector; } (void) drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; ret = drm_connector_register(connector); if (ret) { DRM_ERROR("Failed to register connector\n"); goto err_free_encoder; } ret = drm_crtc_init_with_planes(dev, crtc, primary, &cursor->base, &vmw_stdu_crtc_funcs, NULL); if (ret) { DRM_ERROR("Failed to initialize CRTC\n"); goto err_free_unregister; } drm_crtc_helper_add(crtc, &vmw_stdu_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); drm_object_attach_property(&connector->base, dev_priv->hotplug_mode_update_property, 1); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); return 0; err_free_unregister: drm_connector_unregister(connector); err_free_encoder: drm_encoder_cleanup(encoder); err_free_connector: drm_connector_cleanup(connector); err_free: kfree(stdu); return ret; } /** * vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit * * @stdu: Screen Target Display Unit to be destroyed * * Clean up after vmw_stdu_init */ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu) { vmw_du_cleanup(&stdu->base); kfree(stdu); } /****************************************************************************** * Screen Target Display KMS Functions * * These functions are called by the common KMS code in vmwgfx_kms.c *****************************************************************************/ /** * vmw_kms_stdu_init_display - Initializes a Screen Target based display * * @dev_priv: VMW DRM device * * This function initialize a Screen Target based display device. It checks * the capability bits to make sure the underlying hardware can support * screen targets, and then creates the maximum number of CRTCs, a.k.a Display * Units, as supported by the display hardware. * * RETURNS: * 0 on success, error code otherwise */ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; int i, ret; /* Do nothing if there's no support for MOBs */ if (!dev_priv->has_mob) return -ENOSYS; if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) return -ENOSYS; dev_priv->active_display_unit = vmw_du_screen_target; for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { ret = vmw_stdu_init(dev_priv, i); if (unlikely(ret != 0)) { drm_err(&dev_priv->drm, "Failed to initialize STDU %d", i); return ret; } } drm_mode_config_reset(dev); return 0; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2013 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: * Thomas Hellstrom <[email protected]> * */ #include "vmwgfx_drv.h" #include "ttm_object.h" #include <linux/dma-buf.h> /* * DMA-BUF attach- and mapping methods. No need to implement * these until we have other virtual devices use them. */ static int vmw_prime_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { return -ENOSYS; } static void vmw_prime_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { } static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { return ERR_PTR(-ENOSYS); } static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgb, enum dma_data_direction dir) { } const struct dma_buf_ops vmw_prime_dmabuf_ops = { .attach = vmw_prime_map_attach, .detach = vmw_prime_map_detach, .map_dma_buf = vmw_prime_map_dma_buf, .unmap_dma_buf = vmw_prime_unmap_dma_buf, .release = NULL, }; int vmw_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int fd, u32 *handle) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; return ttm_prime_fd_to_handle(tfile, fd, handle); } int vmw_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include "vmwgfx_drv.h" #include <drm/ttm/ttm_placement.h> #include <linux/idr.h> #include <linux/spinlock.h> #include <linux/kernel.h> struct vmwgfx_gmrid_man { struct ttm_resource_manager manager; spinlock_t lock; struct ida gmr_ida; uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t used_gmr_pages; uint8_t type; }; static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmwgfx_gmrid_man, manager); } static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource **res) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); int id; *res = kmalloc(sizeof(**res), GFP_KERNEL); if (!*res) return -ENOMEM; ttm_resource_init(bo, place, *res); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) return id; spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { gman->used_gmr_pages += PFN_UP((*res)->size); /* * Because the graphics memory is a soft limit we can try to * expand it instead of letting the userspace apps crash. * We're just going to have a sane limit (half of RAM) * on the number of MOB's that we create and will try to keep * the system running until we reach that. */ if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) { const unsigned long max_graphics_pages = totalram_pages() / 2; uint32_t new_max_pages = 0; DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n"); vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n"); if (gman->max_gmr_pages > (max_graphics_pages / 2)) { DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n"); new_max_pages = max_graphics_pages; } else new_max_pages = gman->max_gmr_pages * 2; if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) { DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n", ((new_max_pages) << (PAGE_SHIFT - 10))); gman->max_gmr_pages = new_max_pages; } else { char buf[256]; snprintf(buf, sizeof(buf), "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n", ((gman->max_gmr_pages) << (PAGE_SHIFT - 10))); vmw_host_printf(buf); DRM_WARN("%s", buf); goto nospace; } } } (*res)->start = id; spin_unlock(&gman->lock); return 0; nospace: gman->used_gmr_pages -= PFN_UP((*res)->size); spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); ttm_resource_fini(man, *res); kfree(*res); return -ENOSPC; } static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, struct ttm_resource *res) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); ida_free(&gman->gmr_ida, res->start); spin_lock(&gman->lock); gman->used_gmr_pages -= PFN_UP(res->size); spin_unlock(&gman->lock); ttm_resource_fini(man, res); kfree(res); } static void vmw_gmrid_man_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB); drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n", (gman->type == VMW_PL_MOB) ? "Mob" : "GMR", gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids); } static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) { struct ttm_resource_manager *man; struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); if (unlikely(!gman)) return -ENOMEM; man = &gman->manager; man->func = &vmw_gmrid_manager_func; man->use_tt = true; ttm_resource_manager_init(man, &dev_priv->bdev, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); gman->type = type; switch (type) { case VMW_PL_GMR: gman->max_gmr_ids = dev_priv->max_gmr_ids; gman->max_gmr_pages = dev_priv->max_gmr_pages; break; case VMW_PL_MOB: gman->max_gmr_ids = VMWGFX_NUM_MOB; gman->max_gmr_pages = dev_priv->max_mob_pages; break; default: BUG(); } ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager); ttm_resource_manager_set_used(man, true); return 0; } void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) { struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); ttm_resource_manager_set_used(man, false); ttm_resource_manager_evict_all(&dev_priv->bdev, man); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, type, NULL); ida_destroy(&gman->gmr_ida); kfree(gman); } static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .alloc = vmw_gmrid_man_get_node, .free = vmw_gmrid_man_put_node, .debug = vmw_gmrid_man_debug };
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" /** * struct vmw_user_simple_resource - User-space simple resource struct * * @base: The TTM base object implementing user-space visibility. * @simple: The embedded struct vmw_simple_resource. */ struct vmw_user_simple_resource { struct ttm_base_object base; struct vmw_simple_resource simple; /* * Nothing to be placed after @simple, since size of @simple is * unknown. */ }; /** * vmw_simple_resource_init - Initialize a simple resource object. * * @dev_priv: Pointer to a struct device private. * @simple: The struct vmw_simple_resource to initialize. * @data: Data passed to the information initialization function. * @res_free: Function pointer to destroy the simple resource. * * Returns: * 0 if succeeded. * Negative error value if error, in which case the resource will have been * freed. */ static int vmw_simple_resource_init(struct vmw_private *dev_priv, struct vmw_simple_resource *simple, void *data, void (*res_free)(struct vmw_resource *res)) { struct vmw_resource *res = &simple->res; int ret; ret = vmw_resource_init(dev_priv, res, false, res_free, &simple->func->res_func); if (ret) { res_free(res); return ret; } ret = simple->func->init(res, data); if (ret) { vmw_resource_unreference(&res); return ret; } simple->res.hw_destroy = simple->func->hw_destroy; return 0; } /** * vmw_simple_resource_free - Free a simple resource object. * * @res: The struct vmw_resource member of the simple resource object. * * Frees memory for the object. */ static void vmw_simple_resource_free(struct vmw_resource *res) { struct vmw_user_simple_resource *usimple = container_of(res, struct vmw_user_simple_resource, simple.res); ttm_base_object_kfree(usimple, base); } /** * vmw_simple_resource_base_release - TTM object release callback * * @p_base: The struct ttm_base_object member of the simple resource object. * * Called when the last reference to the embedded struct ttm_base_object is * gone. Typically results in an object free, unless there are other * references to the embedded struct vmw_resource. */ static void vmw_simple_resource_base_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct vmw_user_simple_resource *usimple = container_of(base, struct vmw_user_simple_resource, base); struct vmw_resource *res = &usimple->simple.res; *p_base = NULL; vmw_resource_unreference(&res); } /** * vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to * create a struct vmw_simple_resource. * * @dev: Pointer to a struct drm device. * @data: Ioctl argument. * @file_priv: Pointer to a struct drm_file identifying the caller. * @func: Pointer to a struct vmw_simple_resource_func identifying the * simple resource type. * * Returns: * 0 if success, * Negative error value on error. */ int vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, const struct vmw_simple_resource_func *func) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_user_simple_resource *usimple; struct vmw_resource *res; struct vmw_resource *tmp; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; size_t alloc_size; int ret; alloc_size = offsetof(struct vmw_user_simple_resource, simple) + func->size; usimple = kzalloc(alloc_size, GFP_KERNEL); if (!usimple) { ret = -ENOMEM; goto out_ret; } usimple->simple.func = func; res = &usimple->simple.res; usimple->base.shareable = false; usimple->base.tfile = NULL; /* * From here on, the destructor takes over resource freeing. */ ret = vmw_simple_resource_init(dev_priv, &usimple->simple, data, vmw_simple_resource_free); if (ret) goto out_ret; tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &usimple->base, false, func->ttm_res_type, &vmw_simple_resource_base_release); if (ret) { vmw_resource_unreference(&tmp); goto out_err; } func->set_arg_handle(data, usimple->base.handle); out_err: vmw_resource_unreference(&res); out_ret: return ret; } /** * vmw_simple_resource_lookup - Look up a simple resource from its user-space * handle. * * @tfile: struct ttm_object_file identifying the caller. * @handle: The user-space handle. * @func: The struct vmw_simple_resource_func identifying the simple resource * type. * * Returns: Refcounted pointer to the embedded struct vmw_resource if * successful. Error pointer otherwise. */ struct vmw_resource * vmw_simple_resource_lookup(struct ttm_object_file *tfile, uint32_t handle, const struct vmw_simple_resource_func *func) { struct vmw_user_simple_resource *usimple; struct ttm_base_object *base; struct vmw_resource *res; base = ttm_base_object_lookup(tfile, handle); if (!base) { VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n", func->res_func.type_name, (unsigned long) handle); return ERR_PTR(-ESRCH); } if (ttm_base_object_type(base) != func->ttm_res_type) { ttm_base_object_unref(&base); VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n", func->res_func.type_name, (unsigned long) handle); return ERR_PTR(-EINVAL); } usimple = container_of(base, typeof(*usimple), base); res = vmw_resource_reference(&usimple->simple.res); ttm_base_object_unref(&base); return res; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_binding.h" #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_mksstat.h" #include "vmwgfx_so.h" #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <linux/sync_file.h> #include <linux/hashtable.h> /* * Helper macro to get dx_ctx_node if available otherwise print an error * message. This is for use in command verifier function where if dx_ctx_node * is not set then command is invalid. */ #define VMW_GET_CTX_NODE(__sw_context) \ ({ \ __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ __sw_context->dx_ctx_node; \ }); \ }) #define VMW_DECLARE_CMD_VAR(__var, __type) \ struct { \ SVGA3dCmdHeader header; \ __type body; \ } __var /** * struct vmw_relocation - Buffer object relocation * * @head: List head for the command submission context's relocation list * @vbo: Non ref-counted pointer to buffer object * @mob_loc: Pointer to location for mob id to be modified * @location: Pointer to location for guest pointer to be modified */ struct vmw_relocation { struct list_head head; struct vmw_bo *vbo; union { SVGAMobId *mob_loc; SVGAGuestPtr *location; }; }; /** * enum vmw_resource_relocation_type - Relocation type for resources * * @vmw_res_rel_normal: Traditional relocation. The resource id in the * command stream is replaced with the actual id after validation. * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced * with a NOP. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after * validation is -1, the command is replaced with a NOP. Otherwise no action. * @vmw_res_rel_max: Last value in the enum - used for error checking */ enum vmw_resource_relocation_type { vmw_res_rel_normal, vmw_res_rel_nop, vmw_res_rel_cond_nop, vmw_res_rel_max }; /** * struct vmw_resource_relocation - Relocation info for resources * * @head: List head for the software context's relocation list. * @res: Non-ref-counted pointer to the resource. * @offset: Offset of single byte entries into the command buffer where the id * that needs fixup is located. * @rel_type: Type of relocation. */ struct vmw_resource_relocation { struct list_head head; const struct vmw_resource *res; u32 offset:29; enum vmw_resource_relocation_type rel_type:3; }; /** * struct vmw_ctx_validation_info - Extra validation metadata for contexts * * @head: List head of context list * @ctx: The context resource * @cur: The context's persistent binding state * @staged: The binding state changes of this command buffer */ struct vmw_ctx_validation_info { struct list_head head; struct vmw_resource *ctx; struct vmw_ctx_binding_state *cur; struct vmw_ctx_binding_state *staged; }; /** * struct vmw_cmd_entry - Describe a command for the verifier * * @func: Call-back to handle the command. * @user_allow: Whether allowed from the execbuf ioctl. * @gb_disable: Whether disabled if guest-backed objects are available. * @gb_enable: Whether enabled iff guest-backed objects are available. * @cmd_name: Name of the command. */ struct vmw_cmd_entry { int (*func) (struct vmw_private *, struct vmw_sw_context *, SVGA3dCmdHeader *); bool user_allow; bool gb_disable; bool gb_enable; const char *cmd_name; }; #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ (_gb_disable), (_gb_enable), #_cmd} static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx); static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, struct vmw_bo **vmw_bo_p); /** * vmw_ptr_diff - Compute the offset from a to b in bytes * * @a: A starting pointer. * @b: A pointer offset in the same address space. * * Returns: The offset in bytes between the two pointers. */ static size_t vmw_ptr_diff(void *a, void *b) { return (unsigned long) b - (unsigned long) a; } /** * vmw_execbuf_bindings_commit - Commit modified binding state * * @sw_context: The command submission context * @backoff: Whether this is part of the error path and binding state changes * should be ignored */ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, bool backoff) { struct vmw_ctx_validation_info *entry; list_for_each_entry(entry, &sw_context->ctx_list, head) { if (!backoff) vmw_binding_state_commit(entry->cur, entry->staged); if (entry->staged != sw_context->staged_bindings) vmw_binding_state_free(entry->staged); else sw_context->staged_bindings_inuse = false; } /* List entries are freed with the validation context */ INIT_LIST_HEAD(&sw_context->ctx_list); } /** * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced * * @sw_context: The command submission context */ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) { if (sw_context->dx_query_mob) vmw_context_bind_dx_query(sw_context->dx_query_ctx, sw_context->dx_query_mob); } /** * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to * the validate list. * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context * @res: Pointer to the resource * @node: The validation node holding the context resource metadata */ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, struct vmw_ctx_validation_info *node) { int ret; ret = vmw_resource_context_res_add(dev_priv, sw_context, res); if (unlikely(ret != 0)) goto out_err; if (!sw_context->staged_bindings) { sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); if (IS_ERR(sw_context->staged_bindings)) { ret = PTR_ERR(sw_context->staged_bindings); sw_context->staged_bindings = NULL; goto out_err; } } if (sw_context->staged_bindings_inuse) { node->staged = vmw_binding_state_alloc(dev_priv); if (IS_ERR(node->staged)) { ret = PTR_ERR(node->staged); node->staged = NULL; goto out_err; } } else { node->staged = sw_context->staged_bindings; sw_context->staged_bindings_inuse = true; } node->ctx = res; node->cur = vmw_context_binding_state(res); list_add_tail(&node->head, &sw_context->ctx_list); return 0; out_err: return ret; } /** * vmw_execbuf_res_size - calculate extra size fore the resource validation node * * @dev_priv: Pointer to the device private struct. * @res_type: The resource type. * * Guest-backed contexts and DX contexts require extra size to store execbuf * private information in the validation node. Typically the binding manager * associated data structures. * * Returns: The extra size requirement based on resource type. */ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, enum vmw_res_type res_type) { return (res_type == vmw_res_dx_context || (res_type == vmw_res_context && dev_priv->has_mob)) ? sizeof(struct vmw_ctx_validation_info) : 0; } /** * vmw_execbuf_rcache_update - Update a resource-node cache entry * * @rcache: Pointer to the entry to update. * @res: Pointer to the resource. * @private: Pointer to the execbuf-private space in the resource validation * node. */ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, struct vmw_resource *res, void *private) { rcache->res = res; rcache->private = private; rcache->valid = 1; rcache->valid_handle = 0; } enum vmw_val_add_flags { vmw_val_add_flag_none = 0, vmw_val_add_flag_noctx = 1 << 0, }; /** * vmw_execbuf_res_val_add - Add a resource to the validation list. * * @sw_context: Pointer to the software context. * @res: Unreferenced rcu-protected pointer to the resource. * @dirty: Whether to change dirty status. * @flags: specifies whether to use the context or not * * Returns: 0 on success. Negative error code on failure. Typical error codes * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. */ static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *res, u32 dirty, u32 flags) { struct vmw_private *dev_priv = res->dev_priv; int ret; enum vmw_res_type res_type = vmw_res_type(res); struct vmw_res_cache_entry *rcache; struct vmw_ctx_validation_info *ctx_info; bool first_usage; unsigned int priv_size; rcache = &sw_context->res_cache[res_type]; if (likely(rcache->valid && rcache->res == res)) { if (dirty) vmw_validation_res_set_dirty(sw_context->ctx, rcache->private, dirty); return 0; } if ((flags & vmw_val_add_flag_noctx) != 0) { ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, (void **)&ctx_info, NULL); if (ret) return ret; } else { priv_size = vmw_execbuf_res_size(dev_priv, res_type); ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, dirty, (void **)&ctx_info, &first_usage); if (ret) return ret; if (priv_size && first_usage) { ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, ctx_info); if (ret) { VMW_DEBUG_USER("Failed first usage context setup.\n"); return ret; } } } vmw_execbuf_rcache_update(rcache, res, ctx_info); return 0; } /** * vmw_view_res_val_add - Add a view and the surface it's pointing to to the * validation list * * @sw_context: The software context holding the validation list. * @view: Pointer to the view resource. * * Returns 0 if success, negative error code otherwise. */ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *view) { int ret; /* * First add the resource the view is pointing to, otherwise it may be * swapped out when the view is validated. */ ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view), vmw_view_dirtying(view), vmw_val_add_flag_noctx); if (ret) return ret; return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); } /** * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing * to to the validation list. * * @sw_context: The software context holding the validation list. * @view_type: The view type to look up. * @id: view id of the view. * * The view is represented by a view id and the DX context it's created on, or * scheduled for creation on. If there is no DX context set, the function will * return an -EINVAL error pointer. * * Returns: Unreferenced pointer to the resource on success, negative error * pointer on failure. */ static struct vmw_resource * vmw_view_id_val_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, u32 id) { struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *view; int ret; if (!ctx_node) return ERR_PTR(-EINVAL); view = vmw_view_lookup(sw_context->man, view_type, id); if (IS_ERR(view)) return view; ret = vmw_view_res_val_add(sw_context, view); if (ret) return ERR_PTR(ret); return view; } /** * vmw_resource_context_res_add - Put resources previously bound to a context on * the validation list * * @dev_priv: Pointer to a device private structure * @sw_context: Pointer to a software context used for this command submission * @ctx: Pointer to the context resource * * This function puts all resources that were previously bound to @ctx on the * resource validation list. This is part of the context state reemission */ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx) { struct list_head *binding_list; struct vmw_ctx_bindinfo *entry; int ret = 0; struct vmw_resource *res; u32 i; u32 cotable_max = has_sm5_context(ctx->dev_priv) ? SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX; /* Add all cotables to the validation list. */ if (has_sm4_context(dev_priv) && vmw_res_type(ctx) == vmw_res_dx_context) { for (i = 0; i < cotable_max; ++i) { res = vmw_context_cotable(ctx, i); if (IS_ERR(res)) continue; ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, vmw_val_add_flag_noctx); if (unlikely(ret != 0)) return ret; } } /* Add all resources bound to the context to the validation list */ mutex_lock(&dev_priv->binding_mutex); binding_list = vmw_context_binding_list(ctx); list_for_each_entry(entry, binding_list, ctx_list) { if (vmw_res_type(entry->res) == vmw_res_view) ret = vmw_view_res_val_add(sw_context, entry->res); else ret = vmw_execbuf_res_val_add(sw_context, entry->res, vmw_binding_dirtying(entry->bt), vmw_val_add_flag_noctx); if (unlikely(ret != 0)) break; } if (has_sm4_context(dev_priv) && vmw_res_type(ctx) == vmw_res_dx_context) { struct vmw_bo *dx_query_mob; dx_query_mob = vmw_context_get_dx_query_mob(ctx); if (dx_query_mob) { vmw_bo_placement_set(dx_query_mob, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, dx_query_mob); } } mutex_unlock(&dev_priv->binding_mutex); return ret; } /** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @sw_context: Pointer to the software context. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the id * that needs fixup is located. Granularity is one byte. * @rel_type: Relocation type. */ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, const struct vmw_resource *res, unsigned long offset, enum vmw_resource_relocation_type rel_type) { struct vmw_resource_relocation *rel; rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); if (unlikely(!rel)) { VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); return -ENOMEM; } rel->res = res; rel->offset = offset; rel->rel_type = rel_type; list_add_tail(&rel->head, &sw_context->res_relocations); return 0; } /** * vmw_resource_relocations_free - Free all relocations on a list * * @list: Pointer to the head of the relocation list */ static void vmw_resource_relocations_free(struct list_head *list) { /* Memory is validation context memory, so no need to free it */ INIT_LIST_HEAD(list); } /** * vmw_resource_relocations_apply - Apply all relocations on a list * * @cb: Pointer to the start of the command buffer bein patch. This need not be * the same buffer as the one being parsed when the relocation list was built, * but the contents must be the same modulo the resource ids. * @list: Pointer to the head of the relocation list. */ static void vmw_resource_relocations_apply(uint32_t *cb, struct list_head *list) { struct vmw_resource_relocation *rel; /* Validate the struct vmw_resource_relocation member size */ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); list_for_each_entry(rel, list, head) { u32 *addr = (u32 *)((unsigned long) cb + rel->offset); switch (rel->rel_type) { case vmw_res_rel_normal: *addr = rel->res->id; break; case vmw_res_rel_nop: *addr = SVGA_3D_CMD_NOP; break; default: if (rel->res->id == -1) *addr = SVGA_3D_CMD_NOP; break; } } } static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { return 0; } /** * vmw_resources_reserve - Reserve all resources on the sw_context's resource * list. * * @sw_context: Pointer to the software context. * * Note that since vmware's command submission currently is protected by the * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since * only a single thread at once will attempt this. */ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) { int ret; ret = vmw_validation_res_reserve(sw_context->ctx, true); if (ret) return ret; if (sw_context->dx_query_mob) { struct vmw_bo *expected_dx_query_mob; expected_dx_query_mob = vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); if (expected_dx_query_mob && expected_dx_query_mob != sw_context->dx_query_mob) { ret = -EINVAL; } } return ret; } /** * vmw_cmd_res_check - Check that a resource is present and if so, put it on the * resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @dirty: Whether to change dirty status. * @converter: User-space visisble type specific information. * @id_loc: Pointer to the location in the command buffer currently being parsed * from where the user-space resource id handle is located. * @p_res: Pointer to pointer to resource validalidation node. Populated on * exit. */ static int vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, u32 dirty, const struct vmw_user_resource_conv *converter, uint32_t *id_loc, struct vmw_resource **p_res) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; struct vmw_resource *res; int ret = 0; bool needs_unref = false; if (p_res) *p_res = NULL; if (*id_loc == SVGA3D_INVALID_ID) { if (res_type == vmw_res_context) { VMW_DEBUG_USER("Illegal context invalid id.\n"); return -EINVAL; } return 0; } if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { res = rcache->res; if (dirty) vmw_validation_res_set_dirty(sw_context->ctx, rcache->private, dirty); } else { unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); ret = vmw_validation_preload_res(sw_context->ctx, size); if (ret) return ret; ret = vmw_user_resource_lookup_handle (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res); if (ret != 0) { VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", (unsigned int) *id_loc); return ret; } needs_unref = true; ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none); if (unlikely(ret != 0)) goto res_check_done; if (rcache->valid && rcache->res == res) { rcache->valid_handle = true; rcache->handle = *id_loc; } } ret = vmw_resource_relocation_add(sw_context, res, vmw_ptr_diff(sw_context->buf_start, id_loc), vmw_res_rel_normal); if (p_res) *p_res = res; res_check_done: if (needs_unref) vmw_resource_unreference(&res); return ret; } /** * vmw_rebind_all_dx_query - Rebind DX query associated with the context * * @ctx_res: context the query belongs to * * This function assumes binding_mutex is held. */ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { struct vmw_private *dev_priv = ctx_res->dev_priv; struct vmw_bo *dx_query_mob; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); if (!dx_query_mob || dx_query_mob->dx_query_ctx) return 0; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id); if (cmd == NULL) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; cmd->header.size = sizeof(cmd->body); cmd->body.cid = ctx_res->id; cmd->body.mobid = dx_query_mob->tbo.resource->start; vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_context_bind_dx_query(ctx_res, dx_query_mob); return 0; } /** * vmw_rebind_contexts - Rebind all resources previously bound to referenced * contexts. * * @sw_context: Pointer to the software context. * * Rebind context binding points that have been scrubbed because of eviction. */ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) { struct vmw_ctx_validation_info *val; int ret; list_for_each_entry(val, &sw_context->ctx_list, head) { ret = vmw_binding_rebind_all(val->cur); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) VMW_DEBUG_USER("Failed to rebind context.\n"); return ret; } ret = vmw_rebind_all_dx_query(val->ctx); if (ret != 0) { VMW_DEBUG_USER("Failed to rebind queries.\n"); return ret; } } return 0; } /** * vmw_view_bindings_add - Add an array of view bindings to a context binding * state tracker. * * @sw_context: The execbuf state used for this command. * @view_type: View type for the bindings. * @binding_type: Binding type for the bindings. * @shader_slot: The shader slot to user for the bindings. * @view_ids: Array of view ids to be bound. * @num_views: Number of view ids in @view_ids. * @first_slot: The binding slot to be used for the first view id in @view_ids. */ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, enum vmw_ctx_binding_type binding_type, uint32 shader_slot, uint32 view_ids[], u32 num_views, u32 first_slot) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); u32 i; if (!ctx_node) return -EINVAL; for (i = 0; i < num_views; ++i) { struct vmw_ctx_bindinfo_view binding; struct vmw_resource *view = NULL; if (view_ids[i] != SVGA3D_INVALID_ID) { view = vmw_view_id_val_add(sw_context, view_type, view_ids[i]); if (IS_ERR(view)) { VMW_DEBUG_USER("View not found.\n"); return PTR_ERR(view); } } binding.bi.ctx = ctx_node->ctx; binding.bi.res = view; binding.bi.bt = binding_type; binding.shader_slot = shader_slot; binding.slot = first_slot + i; vmw_binding_add(ctx_node->staged, &binding.bi, shader_slot, binding.slot); } return 0; } /** * vmw_cmd_cid_check - Check a command header for valid context information. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @header: A command header with an embedded user-space context handle. * * Convenience function: Call vmw_cmd_res_check with the user-space context * handle embedded in @header. */ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body, NULL); } /** * vmw_execbuf_info_from_res - Get the private validation metadata for a * recently validated resource * * @sw_context: Pointer to the command submission context * @res: The resource * * The resource pointed to by @res needs to be present in the command submission * context's resource cache and hence the last resource of that type to be * processed by the validation code. * * Return: a pointer to the private metadata of the resource, or NULL if it * wasn't found */ static struct vmw_ctx_validation_info * vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, struct vmw_resource *res) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[vmw_res_type(res)]; if (rcache->valid && rcache->res == res) return rcache->private; WARN_ON_ONCE(true); return NULL; } static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); struct vmw_resource *ctx; struct vmw_resource *res; int ret; cmd = container_of(header, typeof(*cmd), header); if (cmd->body.type >= SVGA3D_RT_MAX) { VMW_DEBUG_USER("Illegal render target type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.target.sid, &res); if (unlikely(ret)) return ret; if (dev_priv->has_mob) { struct vmw_ctx_bindinfo_view binding; struct vmw_ctx_validation_info *node; node = vmw_execbuf_info_from_res(sw_context, ctx); if (!node) return -EINVAL; binding.bi.ctx = ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_rt; binding.slot = cmd->body.type; vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); } return 0; } static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.src.sid, NULL); if (ret) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest.sid, NULL); } static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.src, NULL); if (ret != 0) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest, NULL); } static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.srcSid, NULL); if (ret != 0) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dstSid, NULL); } static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.src.sid, NULL); if (unlikely(ret != 0)) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest.sid, NULL); } static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.srcImage.sid, NULL); } static int vmw_cmd_present_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.sid, NULL); } /** * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. * * @dev_priv: The device private structure. * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * * This function checks whether @new_query_bo is suitable for holding query * results, and if another buffer currently is pinned for query results. If so, * the function prepares the state of @sw_context for switching pinned buffers * after successful submission of the current command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, struct vmw_bo *new_query_bo, struct vmw_sw_context *sw_context) { struct vmw_res_cache_entry *ctx_entry = &sw_context->res_cache[vmw_res_context]; int ret; BUG_ON(!ctx_entry->valid); sw_context->last_query_ctx = ctx_entry->res; if (unlikely(new_query_bo != sw_context->cur_query_bo)) { if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) { VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } if (unlikely(sw_context->cur_query_bo != NULL)) { sw_context->needs_post_query_barrier = true; vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo); ret = vmw_validation_add_bo(sw_context->ctx, sw_context->cur_query_bo); if (unlikely(ret != 0)) return ret; } sw_context->cur_query_bo = new_query_bo; vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo); ret = vmw_validation_add_bo(sw_context->ctx, dev_priv->dummy_query_bo); if (unlikely(ret != 0)) return ret; } return 0; } /** * vmw_query_bo_switch_commit - Finalize switching pinned query buffer * * @dev_priv: The device private structure. * @sw_context: The software context used for this command submission batch. * * This function will check if we're switching query buffers, and will then, * issue a dummy occlusion query wait used as a query barrier. When the fence * object following that query wait has signaled, we are sure that all preceding * queries have finished, and the old query buffer can be unpinned. However, * since both the new query buffer and the old one are fenced with that fence, * we can do an asynchronus unpin now, and be sure that the old query buffer * won't be moved until the fence has signaled. * * As mentioned above, both the new - and old query buffers need to be fenced * using a sequence emitted *after* calling this function. */ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { /* * The validate list should still hold references to all * contexts here. */ if (sw_context->needs_post_query_barrier) { struct vmw_res_cache_entry *ctx_entry = &sw_context->res_cache[vmw_res_context]; struct vmw_resource *ctx; int ret; BUG_ON(!ctx_entry->valid); ctx = ctx_entry->res; ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id); if (unlikely(ret != 0)) VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); } if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo) { vmw_bo_pin_reserved(dev_priv->pinned_bo, false); vmw_bo_unreference(&dev_priv->pinned_bo); } if (!sw_context->needs_post_query_barrier) { vmw_bo_pin_reserved(sw_context->cur_query_bo, true); /* * We pin also the dummy_query_bo buffer so that we * don't need to validate it when emitting dummy queries * in context destroy paths. */ if (!dev_priv->dummy_query_bo_pinned) { vmw_bo_pin_reserved(dev_priv->dummy_query_bo, true); dev_priv->dummy_query_bo_pinned = true; } BUG_ON(sw_context->last_query_ctx == NULL); dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid_valid = true; dev_priv->pinned_bo = vmw_bo_reference(sw_context->cur_query_bo); } } } /** * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle * to a MOB id. * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @id: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the buffer object identified by the * user-space handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a MOB id. The translation does not take place immediately, but * during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. The * former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations. */ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, struct vmw_bo **vmw_bo_p) { struct vmw_bo *vmw_bo; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; vmw_validation_preload_bo(sw_context->ctx); ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); if (ret != 0) { drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); if (!reloc) return -ENOMEM; reloc->mob_loc = id; reloc->vbo = vmw_bo; *vmw_bo_p = vmw_bo; list_add_tail(&reloc->head, &sw_context->bo_relocations); return 0; } /** * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle * to a valid SVGAGuestPtr * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @ptr: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the DMA buffer identified by the user-space * handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a valid SVGAGuestPtr. The translation does not take place * immediately, but during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. * The former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations. */ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, struct vmw_bo **vmw_bo_p) { struct vmw_bo *vmw_bo; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; vmw_validation_preload_bo(sw_context->ctx); ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); if (ret != 0) { drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); if (!reloc) return -ENOMEM; reloc->location = ptr; reloc->vbo = vmw_bo; *vmw_bo_p = vmw_bo; list_add_tail(&reloc->head, &sw_context->bo_relocations); return 0; } /** * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * This function adds the new query into the query COTABLE */ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_resource *cotable_res; int ret; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); if (cmd->body.type < SVGA3D_QUERYTYPE_MIN || cmd->body.type >= SVGA3D_QUERYTYPE_MAX) return -EINVAL; cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); return ret; } /** * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * The query bind operation will eventually associate the query ID with its * backing MOB. In this function, we take the user mode MOB ID and use * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. */ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); struct vmw_bo *vmw_bo; int ret; cmd = container_of(header, typeof(*cmd), header); /* * Look up the buffer pointed to by q.mobid, put it on the relocation * list so its kernel mode MOB ID can be filled in later */ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, &vmw_bo); if (ret != 0) return ret; sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; return 0; } /** * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, NULL); } /** * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) = container_of(header, typeof(*cmd), header); if (unlikely(dev_priv->has_mob)) { VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery); BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; gb_cmd.header.size = cmd->header.size; gb_cmd.body.cid = cmd->body.cid; gb_cmd.body.type = cmd->body.type; memcpy(cmd, &gb_cmd, sizeof(*cmd)); return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); } return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, NULL); } /** * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, &vmw_bo); if (unlikely(ret != 0)) return ret; ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); return ret; } /** * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); int ret; cmd = container_of(header, typeof(*cmd), header); if (dev_priv->has_mob) { VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery); BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; gb_cmd.header.size = cmd->header.size; gb_cmd.body.cid = cmd->body.cid; gb_cmd.body.type = cmd->body.type; gb_cmd.body.mobid = cmd->body.guestResult.gmrId; gb_cmd.body.offset = cmd->body.guestResult.offset; memcpy(cmd, &gb_cmd, sizeof(*cmd)); return vmw_cmd_end_gb_query(dev_priv, sw_context, header); } ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.guestResult, &vmw_bo); if (unlikely(ret != 0)) return ret; ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); return ret; } /** * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid, &vmw_bo); if (unlikely(ret != 0)) return ret; return 0; } /** * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); int ret; cmd = container_of(header, typeof(*cmd), header); if (dev_priv->has_mob) { VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery); BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; gb_cmd.header.size = cmd->header.size; gb_cmd.body.cid = cmd->body.cid; gb_cmd.body.type = cmd->body.type; gb_cmd.body.mobid = cmd->body.guestResult.gmrId; gb_cmd.body.offset = cmd->body.guestResult.offset; memcpy(cmd, &gb_cmd, sizeof(*cmd)); return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); } ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.guestResult, &vmw_bo); if (unlikely(ret != 0)) return ret; return 0; } static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_bo *vmw_bo = NULL; struct vmw_surface *srf = NULL; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); int ret; SVGA3dCmdSurfaceDMASuffix *suffix; uint32_t bo_size; bool dirty; cmd = container_of(header, typeof(*cmd), header); suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body + header->size - sizeof(*suffix)); /* Make sure device and verifier stays in sync. */ if (unlikely(suffix->suffixSize != sizeof(*suffix))) { VMW_DEBUG_USER("Invalid DMA suffix size.\n"); return -EINVAL; } ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.guest.ptr, &vmw_bo); if (unlikely(ret != 0)) return ret; /* Make sure DMA doesn't cross BO boundaries. */ bo_size = vmw_bo->tbo.base.size; if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { VMW_DEBUG_USER("Invalid DMA offset.\n"); return -EINVAL; } bo_size -= cmd->body.guest.ptr.offset; if (unlikely(suffix->maximumOffset > bo_size)) suffix->maximumOffset = bo_size; dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ? VMW_RES_DIRTY_SET : 0; ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty, user_surface_converter, &cmd->body.host.sid, NULL); if (unlikely(ret != 0)) { if (unlikely(ret != -ERESTARTSYS)) VMW_DEBUG_USER("could not find surface for DMA.\n"); return ret; } srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); return 0; } static int vmw_cmd_draw(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives); SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( (unsigned long)header + sizeof(*cmd)); SVGA3dPrimitiveRange *range; uint32_t i; uint32_t maxnum; int ret; ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; cmd = container_of(header, typeof(*cmd), header); maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); if (unlikely(cmd->body.numVertexDecls > maxnum)) { VMW_DEBUG_USER("Illegal number of vertex declarations.\n"); return -EINVAL; } for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &decl->array.surfaceId, NULL); if (unlikely(ret != 0)) return ret; } maxnum = (header->size - sizeof(cmd->body) - cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); if (unlikely(cmd->body.numRanges > maxnum)) { VMW_DEBUG_USER("Illegal number of index ranges.\n"); return -EINVAL; } range = (SVGA3dPrimitiveRange *) decl; for (i = 0; i < cmd->body.numRanges; ++i, ++range) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &range->indexArray.surfaceId, NULL); if (unlikely(ret != 0)) return ret; } return 0; } static int vmw_cmd_tex_state(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState); SVGA3dTextureState *last_state = (SVGA3dTextureState *) ((unsigned long) header + header->size + sizeof(header)); SVGA3dTextureState *cur_state = (SVGA3dTextureState *) ((unsigned long) header + sizeof(*cmd)); struct vmw_resource *ctx; struct vmw_resource *res; int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; for (; cur_state < last_state; ++cur_state) { if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) continue; if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n", (unsigned int) cur_state->stage); return -EINVAL; } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cur_state->value, &res); if (unlikely(ret != 0)) return ret; if (dev_priv->has_mob) { struct vmw_ctx_bindinfo_tex binding; struct vmw_ctx_validation_info *node; node = vmw_execbuf_info_from_res(sw_context, ctx); if (!node) return -EINVAL; binding.bi.ctx = ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_tex; binding.texture_stage = cur_state->stage; vmw_binding_add(node->staged, &binding.bi, 0, binding.texture_stage); } } return 0; } static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf) { struct vmw_bo *vmw_bo; struct { uint32_t header; SVGAFifoCmdDefineGMRFB body; } *cmd = buf; return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr, &vmw_bo); } /** * vmw_cmd_res_switch_backup - Utility function to handle backup buffer * switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res: Pointer to the resource. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface. */ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, uint32_t *buf_id, unsigned long backup_offset) { struct vmw_bo *vbo; void *info; int ret; info = vmw_execbuf_info_from_res(sw_context, res); if (!info) return -EINVAL; ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); if (ret) return ret; vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, backup_offset); return 0; } /** * vmw_cmd_switch_backup - Utility function to handle backup buffer switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res_type: The resource type. * @converter: Information about user-space binding for this resource type. * @res_id: Pointer to the user-space resource handle in the command stream. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface. */ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, const struct vmw_user_resource_conv *converter, uint32_t *res_id, uint32_t *buf_id, unsigned long backup_offset) { struct vmw_resource *res; int ret; ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, VMW_RES_DIRTY_NONE, converter, res_id, &res); if (ret) return ret; return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id, backup_offset); } /** * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) = container_of(header, typeof(*cmd), header); return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->body.sid, &cmd->body.mobid, 0); } /** * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.image.sid, NULL); } /** * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_CLEAR, user_surface_converter, &cmd->body.sid, NULL); } /** * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.image.sid, NULL); } /** * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_CLEAR, user_surface_converter, &cmd->body.sid, NULL); } /** * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.image.sid, NULL); } /** * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_CLEAR, user_surface_converter, &cmd->body.sid, NULL); } /** * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); int ret; size_t size; struct vmw_resource *ctx; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; if (unlikely(!dev_priv->has_mob)) return 0; size = cmd->header.size - sizeof(cmd->body); ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx), cmd->body.shid, cmd + 1, cmd->body.type, size, &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; return vmw_resource_relocation_add(sw_context, NULL, vmw_ptr_diff(sw_context->buf_start, &cmd->header.id), vmw_res_rel_nop); } /** * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); int ret; struct vmw_resource *ctx; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; if (unlikely(!dev_priv->has_mob)) return 0; ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid, cmd->body.type, &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; return vmw_resource_relocation_add(sw_context, NULL, vmw_ptr_diff(sw_context->buf_start, &cmd->header.id), vmw_res_rel_nop); } /** * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); struct vmw_ctx_bindinfo_shader binding; struct vmw_resource *ctx, *res = NULL; struct vmw_ctx_validation_info *ctx_info; int ret; cmd = container_of(header, typeof(*cmd), header); if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; if (!dev_priv->has_mob) return 0; if (cmd->body.shid != SVGA3D_INVALID_ID) { /* * This is the compat shader path - Per device guest-backed * shaders, but user-space thinks it's per context host- * backed shaders. */ res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, cmd->body.type); if (!IS_ERR(res)) { ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); if (unlikely(ret != 0)) return ret; ret = vmw_resource_relocation_add (sw_context, res, vmw_ptr_diff(sw_context->buf_start, &cmd->body.shid), vmw_res_rel_normal); if (unlikely(ret != 0)) return ret; } } if (IS_ERR_OR_NULL(res)) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, VMW_RES_DIRTY_NONE, user_shader_converter, &cmd->body.shid, &res); if (unlikely(ret != 0)) return ret; } ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); if (!ctx_info) return -EINVAL; binding.bi.ctx = ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_shader; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0); return 0; } /** * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, NULL); if (unlikely(ret != 0)) return ret; if (dev_priv->has_mob) header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; return 0; } /** * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) = container_of(header, typeof(*cmd), header); return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, user_shader_converter, &cmd->body.shid, &cmd->body.mobid, cmd->body.offsetInBytes); } /** * vmw_cmd_dx_set_single_constant_buffer - Validate * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_cb binding; int ret; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.sid, &res); if (unlikely(ret != 0)) return ret; if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", (unsigned int) cmd->body.type, (unsigned int) cmd->body.slot); return -EINVAL; } binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.offset = cmd->body.offsetInBytes; binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); return 0; } /** * vmw_cmd_dx_set_constant_buffer_offset - Validate * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); u32 shader_slot; if (!has_sm5_context(dev_priv)) return -EINVAL; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { VMW_DEBUG_USER("Illegal const buffer slot %u.\n", (unsigned int) cmd->body.slot); return -EINVAL; } shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, cmd->body.slot, cmd->body.offsetInBytes); return 0; } /** * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } return vmw_view_bindings_add(sw_context, vmw_view_sr, vmw_ctx_binding_sr, cmd->body.type - SVGA3D_SHADERTYPE_MIN, (void *) &cmd[1], num_sr_view, cmd->body.startView); } /** * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; int ret = 0; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; } if (cmd->body.shaderId != SVGA3D_INVALID_ID) { res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); if (IS_ERR(res)) { VMW_DEBUG_USER("Could not find shader for binding.\n"); return PTR_ERR(res); } ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); if (ret) return ret; } binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_dx_shader; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0); return 0; } /** * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_vb binding; struct vmw_resource *res; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetVertexBuffers body; SVGA3dVertexBuffer buf[]; } *cmd; int i, ret, num; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dVertexBuffer); if ((u64)num + (u64)cmd->body.startBuffer > (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { VMW_DEBUG_USER("Invalid number of vertex buffers.\n"); return -EINVAL; } for (i = 0; i < num; i++) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->buf[i].sid, &res); if (unlikely(ret != 0)) return ret; binding.bi.ctx = ctx_node->ctx; binding.bi.bt = vmw_ctx_binding_vb; binding.bi.res = res; binding.offset = cmd->buf[i].offset; binding.stride = cmd->buf[i].stride; binding.slot = i + cmd->body.startBuffer; vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); } return 0; } /** * vmw_cmd_dx_set_index_buffer - Validate * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_ib binding; struct vmw_resource *res; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer); int ret; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.sid, &res); if (unlikely(ret != 0)) return ret; binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_ib; binding.offset = cmd->body.offset; binding.format = cmd->body.format; vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); return 0; } /** * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) = container_of(header, typeof(*cmd), header); u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dRenderTargetViewId); int ret; if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) { VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n"); return -EINVAL; } ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds, 0, &cmd->body.depthStencilViewId, 1, 0); if (ret) return ret; return vmw_view_bindings_add(sw_context, vmw_view_rt, vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1], num_rt_view, 0); } /** * vmw_cmd_dx_clear_rendertarget_view - Validate * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) = container_of(header, typeof(*cmd), header); struct vmw_resource *ret; ret = vmw_view_id_val_add(sw_context, vmw_view_rt, cmd->body.renderTargetViewId); return PTR_ERR_OR_ZERO(ret); } /** * vmw_cmd_dx_clear_depthstencil_view - Validate * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) = container_of(header, typeof(*cmd), header); struct vmw_resource *ret; ret = vmw_view_id_val_add(sw_context, vmw_view_ds, cmd->body.depthStencilViewId); return PTR_ERR_OR_ZERO(ret); } static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_resource *srf; struct vmw_resource *res; enum vmw_view_type view_type; int ret; /* * This is based on the fact that all affected define commands have the * same initial command body layout. */ struct { SVGA3dCmdHeader header; uint32 defined_id; uint32 sid; } *cmd; if (!ctx_node) return -EINVAL; view_type = vmw_view_cmd_to_type(header->id); if (view_type == vmw_view_max) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { VMW_DEBUG_USER("Invalid surface id.\n"); return -EINVAL; } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->sid, &srf); if (unlikely(ret != 0)) return ret; res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); ret = vmw_cotable_notify(res, cmd->defined_id); if (unlikely(ret != 0)) return ret; return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type, cmd->defined_id, header, header->size + sizeof(*header), &sw_context->staged_cmd_res); } /** * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_so_target binding; struct vmw_resource *res; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetSOTargets body; SVGA3dSoTarget targets[]; } *cmd; int i, ret, num; if (!ctx_node) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget); if (num > SVGA3D_DX_MAX_SOTARGETS) { VMW_DEBUG_USER("Invalid DX SO binding.\n"); return -EINVAL; } for (i = 0; i < num; i++) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->targets[i].sid, &res); if (unlikely(ret != 0)) return ret; binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_so_target; binding.offset = cmd->targets[i].offset; binding.size = cmd->targets[i].sizeInBytes; binding.slot = i; vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot); } return 0; } static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_resource *res; /* * This is based on the fact that all affected define commands have * the same initial command body layout. */ struct { SVGA3dCmdHeader header; uint32 defined_id; } *cmd; enum vmw_so_type so_type; int ret; if (!ctx_node) return -EINVAL; so_type = vmw_so_cmd_to_type(header->id); res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); if (IS_ERR(res)) return PTR_ERR(res); cmd = container_of(header, typeof(*cmd), header); ret = vmw_cotable_notify(res, cmd->defined_id); return ret; } /** * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct { SVGA3dCmdHeader header; union { SVGA3dCmdDXReadbackSubResource r_body; SVGA3dCmdDXInvalidateSubResource i_body; SVGA3dCmdDXUpdateSubResource u_body; SVGA3dSurfaceId sid; }; } *cmd; BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != offsetof(typeof(*cmd), sid)); BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != offsetof(typeof(*cmd), sid)); BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != offsetof(typeof(*cmd), sid)); cmd = container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->sid, NULL); } static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); if (!ctx_node) return -EINVAL; return 0; } /** * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view * resource for removal. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. * * Check that the view exists, and if it was not created using this command * batch, conditionally make this command a NOP. */ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct { SVGA3dCmdHeader header; union vmw_view_destroy body; } *cmd = container_of(header, typeof(*cmd), header); enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); struct vmw_resource *view; int ret; if (!ctx_node) return -EINVAL; ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type, &sw_context->staged_cmd_res, &view); if (ret || !view) return ret; /* * If the view wasn't created during this command batch, it might * have been removed due to a context swapout, so add a * relocation to conditionally make this command a NOP to avoid * device errors. */ return vmw_resource_relocation_add(sw_context, view, vmw_ptr_diff(sw_context->buf_start, &cmd->header.id), vmw_res_rel_cond_nop); } /** * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_resource *res; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) = container_of(header, typeof(*cmd), header); int ret; if (!ctx_node) return -EINVAL; res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); ret = vmw_cotable_notify(res, cmd->body.shaderId); if (ret) return ret; return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, cmd->body.shaderId, cmd->body.type, &sw_context->staged_cmd_res); } /** * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) = container_of(header, typeof(*cmd), header); int ret; if (!ctx_node) return -EINVAL; ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, &sw_context->staged_cmd_res); return ret; } /** * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_resource *ctx; struct vmw_resource *res; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) = container_of(header, typeof(*cmd), header); int ret; if (cmd->body.cid != SVGA3D_INVALID_ID) { ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, VMW_RES_DIRTY_SET, user_context_converter, &cmd->body.cid, &ctx); if (ret) return ret; } else { struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); if (!ctx_node) return -EINVAL; ctx = ctx_node->ctx; } res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0); if (IS_ERR(res)) { VMW_DEBUG_USER("Could not find shader to bind.\n"); return PTR_ERR(res); } ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); if (ret) { VMW_DEBUG_USER("Error creating resource validation node.\n"); return ret; } return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, &cmd->body.mobid, cmd->body.offsetInBytes); } /** * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) = container_of(header, typeof(*cmd), header); struct vmw_resource *view; struct vmw_res_cache_entry *rcache; view = vmw_view_id_val_add(sw_context, vmw_view_sr, cmd->body.shaderResourceViewId); if (IS_ERR(view)) return PTR_ERR(view); /* * Normally the shader-resource view is not gpu-dirtying, but for * this particular command it is... * So mark the last looked-up surface, which is the surface * the view points to, gpu-dirty. */ rcache = &sw_context->res_cache[vmw_res_surface]; vmw_validation_res_set_dirty(sw_context->ctx, rcache->private, VMW_RES_DIRTY_SET); return 0; } /** * vmw_cmd_dx_transfer_from_buffer - Validate * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) = container_of(header, typeof(*cmd), header); int ret; ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.srcSid, NULL); if (ret != 0) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.destSid, NULL); } /** * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) = container_of(header, typeof(*cmd), header); if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)) return -EINVAL; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.surface.sid, NULL); } static int vmw_cmd_sm5(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { if (!has_sm5_context(dev_priv)) return -EINVAL; return 0; } static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { if (!has_sm5_context(dev_priv)) return -EINVAL; return vmw_cmd_dx_view_define(dev_priv, sw_context, header); } static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { if (!has_sm5_context(dev_priv)) return -EINVAL; return vmw_cmd_dx_view_remove(dev_priv, sw_context, header); } static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct { SVGA3dCmdHeader header; SVGA3dCmdDXClearUAViewUint body; } *cmd = container_of(header, typeof(*cmd), header); struct vmw_resource *ret; if (!has_sm5_context(dev_priv)) return -EINVAL; ret = vmw_view_id_val_add(sw_context, vmw_view_ua, cmd->body.uaViewId); return PTR_ERR_OR_ZERO(ret); } static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct { SVGA3dCmdHeader header; SVGA3dCmdDXClearUAViewFloat body; } *cmd = container_of(header, typeof(*cmd), header); struct vmw_resource *ret; if (!has_sm5_context(dev_priv)) return -EINVAL; ret = vmw_view_id_val_add(sw_context, vmw_view_ua, cmd->body.uaViewId); return PTR_ERR_OR_ZERO(ret); } static int vmw_cmd_set_uav(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetUAViews body; } *cmd = container_of(header, typeof(*cmd), header); u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dUAViewId); int ret; if (!has_sm5_context(dev_priv)) return -EINVAL; if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } ret = vmw_view_bindings_add(sw_context, vmw_view_ua, vmw_ctx_binding_uav, 0, (void *)&cmd[1], num_uav, 0); if (ret) return ret; vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0, cmd->body.uavSpliceIndex); return ret; } static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCSUAViews body; } *cmd = container_of(header, typeof(*cmd), header); u32 num_uav = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dUAViewId); int ret; if (!has_sm5_context(dev_priv)) return -EINVAL; if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } ret = vmw_view_bindings_add(sw_context, vmw_view_ua, vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1], num_uav, 0); if (ret) return ret; vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1, cmd->body.startIndex); return ret; } static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *res; struct { SVGA3dCmdHeader header; SVGA3dCmdDXDefineStreamOutputWithMob body; } *cmd = container_of(header, typeof(*cmd), header); int ret; if (!has_sm5_context(dev_priv)) return -EINVAL; if (!ctx_node) { DRM_ERROR("DX Context not set.\n"); return -EINVAL; } res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); ret = vmw_cotable_notify(res, cmd->body.soid); if (ret) return ret; return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx, cmd->body.soid, &sw_context->staged_cmd_res); } static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *res; struct { SVGA3dCmdHeader header; SVGA3dCmdDXDestroyStreamOutput body; } *cmd = container_of(header, typeof(*cmd), header); if (!ctx_node) { DRM_ERROR("DX Context not set.\n"); return -EINVAL; } /* * When device does not support SM5 then streamoutput with mob command is * not available to user-space. Simply return in this case. */ if (!has_sm5_context(dev_priv)) return 0; /* * With SM5 capable device if lookup fails then user-space probably used * old streamoutput define command. Return without an error. */ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), cmd->body.soid); if (IS_ERR(res)) return 0; return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid, &sw_context->staged_cmd_res); } static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *res; struct { SVGA3dCmdHeader header; SVGA3dCmdDXBindStreamOutput body; } *cmd = container_of(header, typeof(*cmd), header); int ret; if (!has_sm5_context(dev_priv)) return -EINVAL; if (!ctx_node) { DRM_ERROR("DX Context not set.\n"); return -EINVAL; } res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), cmd->body.soid); if (IS_ERR(res)) { DRM_ERROR("Could not find streamoutput to bind.\n"); return PTR_ERR(res); } vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes); ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); if (ret) { DRM_ERROR("Error creating resource validation node.\n"); return ret; } return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, &cmd->body.mobid, cmd->body.offsetInBytes); } static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *res; struct vmw_ctx_bindinfo_so binding; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetStreamOutput body; } *cmd = container_of(header, typeof(*cmd), header); int ret; if (!ctx_node) { DRM_ERROR("DX Context not set.\n"); return -EINVAL; } if (cmd->body.soid == SVGA3D_INVALID_ID) return 0; /* * When device does not support SM5 then streamoutput with mob command is * not available to user-space. Simply return in this case. */ if (!has_sm5_context(dev_priv)) return 0; /* * With SM5 capable device if lookup fails then user-space probably used * old streamoutput define command. Return without an error. */ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), cmd->body.soid); if (IS_ERR(res)) { return 0; } ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE, vmw_val_add_flag_noctx); if (ret) { DRM_ERROR("Error creating resource validation node.\n"); return ret; } binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_so; binding.slot = 0; /* Only one SO set to context at a time. */ vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0, binding.slot); return ret; } static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_draw_indexed_instanced_indirect_cmd { SVGA3dCmdHeader header; SVGA3dCmdDXDrawIndexedInstancedIndirect body; } *cmd = container_of(header, typeof(*cmd), header); if (!has_sm5_context(dev_priv)) return -EINVAL; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.argsBufferSid, NULL); } static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_draw_instanced_indirect_cmd { SVGA3dCmdHeader header; SVGA3dCmdDXDrawInstancedIndirect body; } *cmd = container_of(header, typeof(*cmd), header); if (!has_sm5_context(dev_priv)) return -EINVAL; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.argsBufferSid, NULL); } static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { struct vmw_dispatch_indirect_cmd { SVGA3dCmdHeader header; SVGA3dCmdDXDispatchIndirect body; } *cmd = container_of(header, typeof(*cmd), header); if (!has_sm5_context(dev_priv)) return -EINVAL; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.argsBufferSid, NULL); } static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) { uint32_t size_remaining = *size; uint32_t cmd_id; cmd_id = ((uint32_t *)buf)[0]; switch (cmd_id) { case SVGA_CMD_UPDATE: *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); break; case SVGA_CMD_DEFINE_GMRFB: *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); break; case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); break; case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); break; default: VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id); return -EINVAL; } if (*size > size_remaining) { VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n", cmd_id); return -EINVAL; } if (unlikely(!sw_context->kernel)) { VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id); return -EPERM; } if (cmd_id == SVGA_CMD_DEFINE_GMRFB) return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); return 0; } static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, &vmw_cmd_set_render_target_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, &vmw_cmd_blt_surf_screen_check, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, &vmw_cmd_update_gb_surface, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, &vmw_cmd_readback_gb_image, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, &vmw_cmd_readback_gb_surface, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, &vmw_cmd_invalidate_gb_image, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, &vmw_cmd_invalidate_gb_surface, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, false, false, true), /* SM commands */ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, false, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, &vmw_cmd_dx_set_single_constant_buffer, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, &vmw_cmd_dx_set_shader_res, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, &vmw_cmd_dx_set_vertex_buffers, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, &vmw_cmd_dx_set_index_buffer, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, &vmw_cmd_dx_set_rendertargets, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, &vmw_cmd_dx_clear_rendertarget_view, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, &vmw_cmd_dx_clear_depthstencil_view, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, &vmw_cmd_dx_check_subresource, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, &vmw_cmd_dx_check_subresource, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, &vmw_cmd_dx_check_subresource, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, &vmw_cmd_dx_view_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, &vmw_cmd_dx_view_remove, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, &vmw_cmd_dx_view_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, &vmw_cmd_dx_view_remove, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, &vmw_cmd_dx_view_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, &vmw_cmd_dx_view_remove, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, &vmw_cmd_dx_define_shader, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, &vmw_cmd_dx_destroy_shader, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, &vmw_cmd_dx_bind_shader, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, &vmw_cmd_dx_so_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, &vmw_cmd_dx_destroy_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_set_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS, &vmw_cmd_dx_set_so_targets, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, &vmw_cmd_dx_cid_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY, &vmw_cmd_buffer_copy_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_pred_copy_check, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, &vmw_cmd_dx_transfer_from_buffer, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, &vmw_cmd_dx_set_constant_buffer_offset, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, true, false, true), /* * SM5 commands */ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT, &vmw_cmd_clear_uav_float, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT, &vmw_cmd_indexed_instanced_indirect, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT, &vmw_cmd_instanced_indirect, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT, &vmw_cmd_dispatch_indirect, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2, &vmw_cmd_sm5_view_define, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB, &vmw_cmd_dx_define_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, &vmw_cmd_dx_bind_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, &vmw_cmd_dx_so_define, true, false, true), }; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) { u32 cmd_id = ((u32 *) buf)[0]; if (cmd_id >= SVGA_CMD_MAX) { SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; const struct vmw_cmd_entry *entry; *size = header->size + sizeof(SVGA3dCmdHeader); cmd_id = header->id; if (cmd_id >= SVGA_3D_CMD_MAX) return false; cmd_id -= SVGA_3D_CMD_BASE; entry = &vmw_cmd_entries[cmd_id]; *cmd = entry->cmd_name; return true; } switch (cmd_id) { case SVGA_CMD_UPDATE: *cmd = "SVGA_CMD_UPDATE"; *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate); break; case SVGA_CMD_DEFINE_GMRFB: *cmd = "SVGA_CMD_DEFINE_GMRFB"; *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB); break; case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN"; *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); break; case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB"; *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); break; default: *cmd = "UNKNOWN"; *size = 0; return false; } return true; } static int vmw_cmd_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) { uint32_t cmd_id; uint32_t size_remaining = *size; SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; int ret; const struct vmw_cmd_entry *entry; bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; cmd_id = ((uint32_t *)buf)[0]; /* Handle any none 3D commands */ if (unlikely(cmd_id < SVGA_CMD_MAX)) return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); cmd_id = header->id; *size = header->size + sizeof(SVGA3dCmdHeader); cmd_id -= SVGA_3D_CMD_BASE; if (unlikely(*size > size_remaining)) goto out_invalid; if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) goto out_invalid; entry = &vmw_cmd_entries[cmd_id]; if (unlikely(!entry->func)) goto out_invalid; if (unlikely(!entry->user_allow && !sw_context->kernel)) goto out_privileged; if (unlikely(entry->gb_disable && gb)) goto out_old; if (unlikely(entry->gb_enable && !gb)) goto out_new; ret = entry->func(dev_priv, sw_context, header); if (unlikely(ret != 0)) { VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n", cmd_id + SVGA_3D_CMD_BASE, ret); return ret; } return 0; out_invalid: VMW_DEBUG_USER("Invalid SVGA3D command: %d\n", cmd_id + SVGA_3D_CMD_BASE); return -EINVAL; out_privileged: VMW_DEBUG_USER("Privileged SVGA3D command: %d\n", cmd_id + SVGA_3D_CMD_BASE); return -EPERM; out_old: VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n", cmd_id + SVGA_3D_CMD_BASE); return -EINVAL; out_new: VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n", cmd_id + SVGA_3D_CMD_BASE); return -EINVAL; } static int vmw_cmd_check_all(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t size) { int32_t cur_size = size; int ret; sw_context->buf_start = buf; while (cur_size > 0) { size = cur_size; ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); if (unlikely(ret != 0)) return ret; buf = (void *)((unsigned long) buf + size); cur_size -= size; } if (unlikely(cur_size != 0)) { VMW_DEBUG_USER("Command verifier out of sync.\n"); return -EINVAL; } return 0; } static void vmw_free_relocations(struct vmw_sw_context *sw_context) { /* Memory is validation context memory, so no need to free it */ INIT_LIST_HEAD(&sw_context->bo_relocations); } static void vmw_apply_relocations(struct vmw_sw_context *sw_context) { struct vmw_relocation *reloc; struct ttm_buffer_object *bo; list_for_each_entry(reloc, &sw_context->bo_relocations, head) { bo = &reloc->vbo->tbo; switch (bo->resource->mem_type) { case TTM_PL_VRAM: reloc->location->offset += bo->resource->start << PAGE_SHIFT; reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; break; case VMW_PL_GMR: reloc->location->gmrId = bo->resource->start; break; case VMW_PL_MOB: *reloc->mob_loc = bo->resource->start; break; default: BUG(); } } vmw_free_relocations(sw_context); } static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, uint32_t size) { if (likely(sw_context->cmd_bounce_size >= size)) return 0; if (sw_context->cmd_bounce_size == 0) sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; while (sw_context->cmd_bounce_size < size) { sw_context->cmd_bounce_size = PAGE_ALIGN(sw_context->cmd_bounce_size + (sw_context->cmd_bounce_size >> 1)); } vfree(sw_context->cmd_bounce); sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); if (sw_context->cmd_bounce == NULL) { VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n"); sw_context->cmd_bounce_size = 0; return -ENOMEM; } return 0; } /* * vmw_execbuf_fence_commands - create and submit a command stream fence * * Creates a fence object and submits a command stream marker. * If this fails for some reason, We sync the fifo and return NULL. * It is then safe to fence buffers with a NULL pointer. * * If @p_handle is not NULL @file_priv must also not be NULL. Creates a * userspace handle if @p_handle is not NULL, otherwise not. */ int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle) { uint32_t sequence; int ret; bool synced = false; /* p_handle implies file_priv. */ BUG_ON(p_handle != NULL && file_priv == NULL); ret = vmw_cmd_send_fence(dev_priv, &sequence); if (unlikely(ret != 0)) { VMW_DEBUG_USER("Fence submission error. Syncing.\n"); synced = true; } if (p_handle != NULL) ret = vmw_user_fence_create(file_priv, dev_priv->fman, sequence, p_fence, p_handle); else ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); if (unlikely(ret != 0 && !synced)) { (void) vmw_fallback_wait(dev_priv, false, false, sequence, false, VMW_FENCE_WAIT_TIMEOUT); *p_fence = NULL; } return ret; } /** * vmw_execbuf_copy_fence_user - copy fence object information to user-space. * * @dev_priv: Pointer to a vmw_private struct. * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. * @ret: Return value from fence object creation. * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which * the information should be copied. * @fence: Pointer to the fenc object. * @fence_handle: User-space fence handle. * @out_fence_fd: exported file descriptor for the fence. -1 if not used * * This function copies fence information to user-space. If copying fails, the * user-space struct drm_vmw_fence_rep::error member is hopefully left * untouched, and if it's preloaded with an -EFAULT by user-space, the error * will hopefully be detected. * * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) return 0; memset(&fence_rep, 0, sizeof(fence_rep)); fence_rep.error = ret; fence_rep.fd = out_fence_fd; if (ret == 0) { BUG_ON(fence == NULL); fence_rep.handle = fence_handle; fence_rep.seqno = fence->base.seqno; vmw_update_seqno(dev_priv); fence_rep.passed_seqno = dev_priv->last_read_seqno; } /* * copy_to_user errors will be detected by user space not seeing * fence_rep::error filled in. Typically user-space would have pre-set * that member to -EFAULT. */ ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); /* * User-space lost the fence object. We need to sync and unreference the * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } return ret ? -EFAULT : 0; } /** * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. * * @dev_priv: Pointer to a device private structure. * @kernel_commands: Pointer to the unpatched command batch. * @command_size: Size of the unpatched command batch. * @sw_context: Structure holding the relocation lists. * * Side effects: If this function returns 0, then the command batch pointed to * by @kernel_commands will have been modified. */ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, void *kernel_commands, u32 command_size, struct vmw_sw_context *sw_context) { void *cmd; if (sw_context->dx_ctx_node) cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size, sw_context->dx_ctx_node->ctx->id); else cmd = VMW_CMD_RESERVE(dev_priv, command_size); if (!cmd) return -ENOMEM; vmw_apply_relocations(sw_context); memcpy(cmd, kernel_commands, command_size); vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations); vmw_cmd_commit(dev_priv, command_size); return 0; } /** * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the * command buffer manager. * * @dev_priv: Pointer to a device private structure. * @header: Opaque handle to the command buffer allocation. * @command_size: Size of the unpatched command batch. * @sw_context: Structure holding the relocation lists. * * Side effects: If this function returns 0, then the command buffer represented * by @header will have been modified. */ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, struct vmw_cmdbuf_header *header, u32 command_size, struct vmw_sw_context *sw_context) { u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : SVGA3D_INVALID_ID); void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false, header); vmw_apply_relocations(sw_context); vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations); vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); return 0; } /** * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for * submission using a command buffer. * * @dev_priv: Pointer to a device private structure. * @user_commands: User-space pointer to the commands to be submitted. * @command_size: Size of the unpatched command batch. * @header: Out parameter returning the opaque pointer to the command buffer. * * This function checks whether we can use the command buffer manager for * submission and if so, creates a command buffer of suitable size and copies * the user data into that buffer. * * On successful return, the function returns a pointer to the data in the * command buffer and *@header is set to non-NULL. * * @kernel_commands: If command buffers could not be used, the function will * return the value of @kernel_commands on function call. That value may be * NULL. In that case, the value of *@header will be set to NULL. * * If an error is encountered, the function will return a pointer error value. * If the function is interrupted by a signal while sleeping, it will return * -ERESTARTSYS casted to a pointer error value. */ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, u32 command_size, struct vmw_cmdbuf_header **header) { size_t cmdbuf_size; int ret; *header = NULL; if (command_size > SVGA_CB_MAX_SIZE) { VMW_DEBUG_USER("Command buffer is too large.\n"); return ERR_PTR(-EINVAL); } if (!dev_priv->cman || kernel_commands) return kernel_commands; /* If possible, add a little space for fencing. */ cmdbuf_size = command_size + 512; cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true, header); if (IS_ERR(kernel_commands)) return kernel_commands; ret = copy_from_user(kernel_commands, user_commands, command_size); if (ret) { VMW_DEBUG_USER("Failed copying commands.\n"); vmw_cmdbuf_header_free(*header); *header = NULL; return ERR_PTR(-EFAULT); } return kernel_commands; } static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, uint32_t handle) { struct vmw_resource *res; int ret; unsigned int size; if (handle == SVGA3D_INVALID_ID) return 0; size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context); ret = vmw_validation_preload_res(sw_context->ctx, size); if (ret) return ret; ret = vmw_user_resource_lookup_handle (dev_priv, sw_context->fp->tfile, handle, user_context_converter, &res); if (ret != 0) { VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n", (unsigned int) handle); return ret; } ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET, vmw_val_add_flag_none); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); return ret; } sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); sw_context->man = vmw_context_res_man(res); vmw_resource_unreference(&res); return 0; } int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, uint32_t command_size, uint64_t throttle_us, uint32_t dx_context_handle, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence, uint32_t flags) { struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_fence_obj *fence = NULL; struct vmw_cmdbuf_header *header; uint32_t handle = 0; int ret; int32_t out_fence_fd = -1; struct sync_file *sync_file = NULL; DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1); if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { VMW_DEBUG_USER("Failed to get a fence fd.\n"); return out_fence_fd; } } if (throttle_us) { VMW_DEBUG_USER("Throttling is no longer supported.\n"); } kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, kernel_commands, command_size, &header); if (IS_ERR(kernel_commands)) { ret = PTR_ERR(kernel_commands); goto out_free_fence_fd; } ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); if (ret) { ret = -ERESTARTSYS; goto out_free_header; } sw_context->kernel = false; if (kernel_commands == NULL) { ret = vmw_resize_cmd_bounce(sw_context, command_size); if (unlikely(ret != 0)) goto out_unlock; ret = copy_from_user(sw_context->cmd_bounce, user_commands, command_size); if (unlikely(ret != 0)) { ret = -EFAULT; VMW_DEBUG_USER("Failed copying commands.\n"); goto out_unlock; } kernel_commands = sw_context->cmd_bounce; } else if (!header) { sw_context->kernel = true; } sw_context->filp = file_priv; sw_context->fp = vmw_fpriv(file_priv); INIT_LIST_HEAD(&sw_context->ctx_list); sw_context->cur_query_bo = dev_priv->pinned_bo; sw_context->last_query_ctx = NULL; sw_context->needs_post_query_barrier = false; sw_context->dx_ctx_node = NULL; sw_context->dx_query_mob = NULL; sw_context->dx_query_ctx = NULL; memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); INIT_LIST_HEAD(&sw_context->res_relocations); INIT_LIST_HEAD(&sw_context->bo_relocations); if (sw_context->staged_bindings) vmw_binding_state_reset(sw_context->staged_bindings); INIT_LIST_HEAD(&sw_context->staged_cmd_res); sw_context->ctx = &val_ctx; ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); if (unlikely(ret != 0)) goto out_err_nores; ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) goto out_err_nores; ret = vmw_resources_reserve(sw_context); if (unlikely(ret != 0)) goto out_err_nores; ret = vmw_validation_bo_reserve(&val_ctx, true); if (unlikely(ret != 0)) goto out_err_nores; ret = vmw_validation_bo_validate(&val_ctx, true); if (unlikely(ret != 0)) goto out_err; ret = vmw_validation_res_validate(&val_ctx, true); if (unlikely(ret != 0)) goto out_err; vmw_validation_drop_ht(&val_ctx); ret = mutex_lock_interruptible(&dev_priv->binding_mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_err; } if (dev_priv->has_mob) { ret = vmw_rebind_contexts(sw_context); if (unlikely(ret != 0)) goto out_unlock_binding; } if (!header) { ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, command_size, sw_context); } else { ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, sw_context); header = NULL; } mutex_unlock(&dev_priv->binding_mutex); if (ret) goto out_err; vmw_query_bo_switch_commit(dev_priv, sw_context); ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, (user_fence_rep) ? &handle : NULL); /* * This error is harmless, because if fence submission fails, * vmw_fifo_send_fence will sync. The error will be propagated to * user-space in @fence_rep */ if (ret != 0) VMW_DEBUG_USER("Fence submission error. Syncing.\n"); vmw_execbuf_bindings_commit(sw_context, false); vmw_bind_dx_query_mob(sw_context); vmw_validation_res_unreserve(&val_ctx, false); vmw_validation_bo_fence(sw_context->ctx, fence); if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) __vmw_execbuf_release_pinned_bo(dev_priv, fence); /* * If anything fails here, give up trying to export the fence and do a * sync since the user mode will not be able to sync the fence itself. * This ensures we are still functionally correct. */ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { sync_file = sync_file_create(&fence->base); if (!sync_file) { VMW_DEBUG_USER("Sync file create failed for fence\n"); put_unused_fd(out_fence_fd); out_fence_fd = -1; (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } } ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, handle, out_fence_fd); if (sync_file) { if (ret) { /* usercopy of fence failed, put the file object */ fput(sync_file->file); put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; fence = NULL; } else if (likely(fence != NULL)) { vmw_fence_obj_unreference(&fence); } vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); mutex_unlock(&dev_priv->cmdbuf_mutex); /* * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks * in resource destruction paths. */ vmw_validation_unref_lists(&val_ctx); return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); out_err: vmw_validation_bo_backoff(&val_ctx); out_err_nores: vmw_execbuf_bindings_commit(sw_context, true); vmw_validation_res_unreserve(&val_ctx, true); vmw_resource_relocations_free(&sw_context->res_relocations); vmw_free_relocations(sw_context); if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) __vmw_execbuf_release_pinned_bo(dev_priv, NULL); out_unlock: vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); vmw_validation_drop_ht(&val_ctx); WARN_ON(!list_empty(&sw_context->ctx_list)); mutex_unlock(&dev_priv->cmdbuf_mutex); /* * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks * in resource destruction paths. */ vmw_validation_unref_lists(&val_ctx); out_free_header: if (header) vmw_cmdbuf_header_free(header); out_free_fence_fd: if (out_fence_fd >= 0) put_unused_fd(out_fence_fd); return ret; } /** * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. * * @dev_priv: The device private structure. * * This function is called to idle the fifo and unpin the query buffer if the * normal way to do this hits an error, which should typically be extremely * rare. */ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) { VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n"); (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); vmw_bo_pin_reserved(dev_priv->pinned_bo, false); if (dev_priv->dummy_query_bo_pinned) { vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); dev_priv->dummy_query_bo_pinned = false; } } /** * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query * bo. * * @dev_priv: The device private structure. * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a * query barrier that flushes all queries touching the current buffer pointed to * by @dev_priv->pinned_bo * * This function should be used to unpin the pinned query bo, or as a query * barrier when we need to make sure that all queries have finished before the * next fifo command. (For example on hardware context destructions where the * hardware may otherwise leak unfinished queries). * * This function does not return any failure codes, but make attempts to do safe * unpinning in case of errors. * * The function will synchronize on the previous query barrier, and will thus * not finish until that barrier has executed. * * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before * calling this function. */ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, struct vmw_fence_obj *fence) { int ret = 0; struct vmw_fence_obj *lfence = NULL; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); if (dev_priv->pinned_bo == NULL) goto out_unlock; vmw_bo_placement_set(dev_priv->pinned_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo); if (ret) goto out_no_reserve; vmw_bo_placement_set(dev_priv->dummy_query_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo); if (ret) goto out_no_reserve; ret = vmw_validation_bo_reserve(&val_ctx, false); if (ret) goto out_no_reserve; if (dev_priv->query_cid_valid) { BUG_ON(fence != NULL); ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid); if (ret) goto out_no_emit; dev_priv->query_cid_valid = false; } vmw_bo_pin_reserved(dev_priv->pinned_bo, false); if (dev_priv->dummy_query_bo_pinned) { vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); dev_priv->dummy_query_bo_pinned = false; } if (fence == NULL) { (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, NULL); fence = lfence; } vmw_validation_bo_fence(&val_ctx, fence); if (lfence != NULL) vmw_fence_obj_unreference(&lfence); vmw_validation_unref_lists(&val_ctx); vmw_bo_unreference(&dev_priv->pinned_bo); out_unlock: return; out_no_emit: vmw_validation_bo_backoff(&val_ctx); out_no_reserve: vmw_validation_unref_lists(&val_ctx); vmw_execbuf_unpin_panic(dev_priv); vmw_bo_unreference(&dev_priv->pinned_bo); } /** * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. * * @dev_priv: The device private structure. * * This function should be used to unpin the pinned query bo, or as a query * barrier when we need to make sure that all queries have finished before the * next fifo command. (For example on hardware context destructions where the * hardware may otherwise leak unfinished queries). * * This function does not return any failure codes, but make attempts to do safe * unpinning in case of errors. * * The function will synchronize on the previous query barrier, and will thus * not finish until that barrier has executed. */ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) { mutex_lock(&dev_priv->cmdbuf_mutex); if (dev_priv->query_cid_valid) __vmw_execbuf_release_pinned_bo(dev_priv, NULL); mutex_unlock(&dev_priv->cmdbuf_mutex); } int vmw_execbuf_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_execbuf_arg *arg = data; int ret; struct dma_fence *in_fence = NULL; MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF); MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF); /* * Extend the ioctl argument while maintaining backwards compatibility: * We take different code paths depending on the value of arg->version. * * Note: The ioctl argument is extended and zeropadded by core DRM. */ if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION || arg->version == 0)) { VMW_DEBUG_USER("Incorrect execbuf version.\n"); ret = -EINVAL; goto mksstats_out; } switch (arg->version) { case 1: /* For v1 core DRM have extended + zeropadded the data */ arg->context_handle = (uint32_t) -1; break; case 2: default: /* For v2 and later core DRM would have correctly copied it */ break; } /* If imported a fence FD from elsewhere, then wait on it */ if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { in_fence = sync_file_get_fence(arg->imported_fence_fd); if (!in_fence) { VMW_DEBUG_USER("Cannot get imported fence\n"); ret = -EINVAL; goto mksstats_out; } ret = dma_fence_wait(in_fence, true); if (ret) goto out; } ret = vmw_execbuf_process(file_priv, dev_priv, (void __user *)(unsigned long)arg->commands, NULL, arg->command_size, arg->throttle_us, arg->context_handle, (void __user *)(unsigned long)arg->fence_rep, NULL, arg->flags); if (unlikely(ret != 0)) goto out; vmw_kms_cursor_post_execbuf(dev_priv); out: if (in_fence) dma_fence_put(in_fence); mksstats_out: MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF); return ret; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include <linux/sched/signal.h> #include "vmwgfx_drv.h" #define VMW_FENCE_WRAP (1 << 31) struct vmw_fence_manager { int num_fence_objects; struct vmw_private *dev_priv; spinlock_t lock; struct list_head fence_list; struct work_struct work; bool fifo_down; struct list_head cleanup_list; uint32_t pending_actions[VMW_ACTION_MAX]; struct mutex goal_irq_mutex; bool goal_irq_on; /* Protected by @goal_irq_mutex */ bool seqno_valid; /* Protected by @lock, and may not be set to true without the @goal_irq_mutex held. */ u64 ctx; }; struct vmw_user_fence { struct ttm_base_object base; struct vmw_fence_obj fence; }; /** * struct vmw_event_fence_action - fence action that delivers a drm event. * * @action: A struct vmw_fence_action to hook up to a fence. * @event: A pointer to the pending event. * @fence: A referenced pointer to the fence to keep it alive while @action * hangs on it. * @dev: Pointer to a struct drm_device so we can access the event stuff. * @tv_sec: If non-null, the variable pointed to will be assigned * current time tv_sec val when the fence signals. * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will * be assigned the current time tv_usec val when the fence signals. */ struct vmw_event_fence_action { struct vmw_fence_action action; struct drm_pending_event *event; struct vmw_fence_obj *fence; struct drm_device *dev; uint32_t *tv_sec; uint32_t *tv_usec; }; static struct vmw_fence_manager * fman_from_fence(struct vmw_fence_obj *fence) { return container_of(fence->base.lock, struct vmw_fence_manager, lock); } static u32 vmw_fence_goal_read(struct vmw_private *vmw) { if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) return vmw_read(vmw, SVGA_REG_FENCE_GOAL); else return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL); } static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value) { if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) vmw_write(vmw, SVGA_REG_FENCE_GOAL, value); else vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value); } /* * Note on fencing subsystem usage of irqs: * Typically the vmw_fences_update function is called * * a) When a new fence seqno has been submitted by the fifo code. * b) On-demand when we have waiters. Sleeping waiters will switch on the * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE * irq is received. When the last fence waiter is gone, that IRQ is masked * away. * * In situations where there are no waiters and we don't submit any new fences, * fence objects may not be signaled. This is perfectly OK, since there are * no consumers of the signaled data, but that is NOT ok when there are fence * actions attached to a fence. The fencing subsystem then makes use of the * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence * which has an action attached, and each time vmw_fences_update is called, * the subsystem makes sure the fence goal seqno is updated. * * The fence goal seqno irq is on as long as there are unsignaled fence * objects with actions attached to them. */ static void vmw_fence_obj_destroy(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); struct vmw_fence_manager *fman = fman_from_fence(fence); spin_lock(&fman->lock); list_del_init(&fence->head); --fman->num_fence_objects; spin_unlock(&fman->lock); fence->destroy(fence); } static const char *vmw_fence_get_driver_name(struct dma_fence *f) { return "vmwgfx"; } static const char *vmw_fence_get_timeline_name(struct dma_fence *f) { return "svga"; } static bool vmw_fence_enable_signaling(struct dma_fence *f) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_private *dev_priv = fman->dev_priv; u32 seqno = vmw_fence_read(dev_priv); if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; return true; } struct vmwgfx_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct vmwgfx_wait_cb *wait = container_of(cb, struct vmwgfx_wait_cb, base); wake_up_process(wait->task); } static void __vmw_fences_update(struct vmw_fence_manager *fman); static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) { struct vmw_fence_obj *fence = container_of(f, struct vmw_fence_obj, base); struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_private *dev_priv = fman->dev_priv; struct vmwgfx_wait_cb cb; long ret = timeout; if (likely(vmw_fence_obj_signaled(fence))) return timeout; vmw_seqno_waiter_add(dev_priv); spin_lock(f->lock); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) goto out; if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cb.base.func = vmwgfx_wait_cb; cb.task = current; list_add(&cb.base.node, &f->cb_list); for (;;) { __vmw_fences_update(fman); /* * We can use the barrier free __set_current_state() since * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the * fence spinlock. */ if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { if (ret == 0 && timeout > 0) ret = 1; break; } if (intr && signal_pending(current)) { ret = -ERESTARTSYS; break; } if (ret == 0) break; spin_unlock(f->lock); ret = schedule_timeout(ret); spin_lock(f->lock); } __set_current_state(TASK_RUNNING); if (!list_empty(&cb.base.node)) list_del(&cb.base.node); out: spin_unlock(f->lock); vmw_seqno_waiter_remove(dev_priv); return ret; } static const struct dma_fence_ops vmw_fence_ops = { .get_driver_name = vmw_fence_get_driver_name, .get_timeline_name = vmw_fence_get_timeline_name, .enable_signaling = vmw_fence_enable_signaling, .wait = vmw_fence_wait, .release = vmw_fence_obj_destroy, }; /* * Execute signal actions on fences recently signaled. * This is done from a workqueue so we don't have to execute * signal actions from atomic context. */ static void vmw_fence_work_func(struct work_struct *work) { struct vmw_fence_manager *fman = container_of(work, struct vmw_fence_manager, work); struct list_head list; struct vmw_fence_action *action, *next_action; bool seqno_valid; do { INIT_LIST_HEAD(&list); mutex_lock(&fman->goal_irq_mutex); spin_lock(&fman->lock); list_splice_init(&fman->cleanup_list, &list); seqno_valid = fman->seqno_valid; spin_unlock(&fman->lock); if (!seqno_valid && fman->goal_irq_on) { fman->goal_irq_on = false; vmw_goal_waiter_remove(fman->dev_priv); } mutex_unlock(&fman->goal_irq_mutex); if (list_empty(&list)) return; /* * At this point, only we should be able to manipulate the * list heads of the actions we have on the private list. * hence fman::lock not held. */ list_for_each_entry_safe(action, next_action, &list, head) { list_del_init(&action->head); if (action->cleanup) action->cleanup(action); } } while (1); } struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) { struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); if (unlikely(!fman)) return NULL; fman->dev_priv = dev_priv; spin_lock_init(&fman->lock); INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); fman->fifo_down = true; mutex_init(&fman->goal_irq_mutex); fman->ctx = dma_fence_context_alloc(1); return fman; } void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) { bool lists_empty; (void) cancel_work_sync(&fman->work); spin_lock(&fman->lock); lists_empty = list_empty(&fman->fence_list) && list_empty(&fman->cleanup_list); spin_unlock(&fman->lock); BUG_ON(!lists_empty); kfree(fman); } static int vmw_fence_obj_init(struct vmw_fence_manager *fman, struct vmw_fence_obj *fence, u32 seqno, void (*destroy) (struct vmw_fence_obj *fence)) { int ret = 0; dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, fman->ctx, seqno); INIT_LIST_HEAD(&fence->seq_passed_actions); fence->destroy = destroy; spin_lock(&fman->lock); if (unlikely(fman->fifo_down)) { ret = -EBUSY; goto out_unlock; } list_add_tail(&fence->head, &fman->fence_list); ++fman->num_fence_objects; out_unlock: spin_unlock(&fman->lock); return ret; } static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, struct list_head *list) { struct vmw_fence_action *action, *next_action; list_for_each_entry_safe(action, next_action, list, head) { list_del_init(&action->head); fman->pending_actions[action->type]--; if (action->seq_passed != NULL) action->seq_passed(action); /* * Add the cleanup action to the cleanup list so that * it will be performed by a worker task. */ list_add_tail(&action->head, &fman->cleanup_list); } } /** * vmw_fence_goal_new_locked - Figure out a new device fence goal * seqno if needed. * * @fman: Pointer to a fence manager. * @passed_seqno: The seqno the device currently signals as passed. * * This function should be called with the fence manager lock held. * It is typically called when we have a new passed_seqno, and * we might need to update the fence goal. It checks to see whether * the current fence goal has already passed, and, in that case, * scans through all unsignaled fences to get the next fence object with an * action attached, and sets the seqno of that fence as a new fence goal. * * returns true if the device goal seqno was updated. False otherwise. */ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, u32 passed_seqno) { u32 goal_seqno; struct vmw_fence_obj *fence; if (likely(!fman->seqno_valid)) return false; goal_seqno = vmw_fence_goal_read(fman->dev_priv); if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) return false; fman->seqno_valid = false; list_for_each_entry(fence, &fman->fence_list, head) { if (!list_empty(&fence->seq_passed_actions)) { fman->seqno_valid = true; vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); break; } } return true; } /** * vmw_fence_goal_check_locked - Replace the device fence goal seqno if * needed. * * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be * considered as a device fence goal. * * This function should be called with the fence manager lock held. * It is typically called when an action has been attached to a fence to * check whether the seqno of that fence should be used for a fence * goal interrupt. This is typically needed if the current fence goal is * invalid, or has a higher seqno than that of the current fence object. * * returns true if the device goal seqno was updated. False otherwise. */ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); u32 goal_seqno; if (dma_fence_is_signaled_locked(&fence->base)) return false; goal_seqno = vmw_fence_goal_read(fman->dev_priv); if (likely(fman->seqno_valid && goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) return false; vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); fman->seqno_valid = true; return true; } static void __vmw_fences_update(struct vmw_fence_manager *fman) { struct vmw_fence_obj *fence, *next_fence; struct list_head action_list; bool needs_rerun; uint32_t seqno, new_seqno; seqno = vmw_fence_read(fman->dev_priv); rerun: list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { list_del_init(&fence->head); dma_fence_signal_locked(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); vmw_fences_perform_actions(fman, &action_list); } else break; } /* * Rerun if the fence goal seqno was updated, and the * hardware might have raced with that update, so that * we missed a fence_goal irq. */ needs_rerun = vmw_fence_goal_new_locked(fman, seqno); if (unlikely(needs_rerun)) { new_seqno = vmw_fence_read(fman->dev_priv); if (new_seqno != seqno) { seqno = new_seqno; goto rerun; } } if (!list_empty(&fman->cleanup_list)) (void) schedule_work(&fman->work); } void vmw_fences_update(struct vmw_fence_manager *fman) { spin_lock(&fman->lock); __vmw_fences_update(fman); spin_unlock(&fman->lock); } bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) return true; vmw_fences_update(fman); return dma_fence_is_signaled(&fence->base); } int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, bool interruptible, unsigned long timeout) { long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); if (likely(ret > 0)) return 0; else if (ret == 0) return -EBUSY; else return ret; } static void vmw_fence_destroy(struct vmw_fence_obj *fence) { dma_fence_free(&fence->base); } int vmw_fence_create(struct vmw_fence_manager *fman, uint32_t seqno, struct vmw_fence_obj **p_fence) { struct vmw_fence_obj *fence; int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (unlikely(!fence)) return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy); if (unlikely(ret != 0)) goto out_err_init; *p_fence = fence; return 0; out_err_init: kfree(fence); return ret; } static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) { struct vmw_user_fence *ufence = container_of(fence, struct vmw_user_fence, fence); ttm_base_object_kfree(ufence, base); } static void vmw_user_fence_base_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct vmw_user_fence *ufence = container_of(base, struct vmw_user_fence, base); struct vmw_fence_obj *fence = &ufence->fence; *p_base = NULL; vmw_fence_obj_unreference(&fence); } int vmw_user_fence_create(struct drm_file *file_priv, struct vmw_fence_manager *fman, uint32_t seqno, struct vmw_fence_obj **p_fence, uint32_t *p_handle) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_user_fence *ufence; struct vmw_fence_obj *tmp; int ret; ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (unlikely(!ufence)) { ret = -ENOMEM; goto out_no_object; } ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, vmw_user_fence_destroy); if (unlikely(ret != 0)) { kfree(ufence); goto out_no_object; } /* * The base object holds a reference which is freed in * vmw_user_fence_base_release. */ tmp = vmw_fence_obj_reference(&ufence->fence); ret = ttm_base_object_init(tfile, &ufence->base, false, VMW_RES_FENCE, &vmw_user_fence_base_release); if (unlikely(ret != 0)) { /* * Free the base object's reference */ vmw_fence_obj_unreference(&tmp); goto out_err; } *p_fence = &ufence->fence; *p_handle = ufence->base.handle; return 0; out_err: tmp = &ufence->fence; vmw_fence_obj_unreference(&tmp); out_no_object: return ret; } /* * vmw_fence_fifo_down - signal all unsignaled fence objects. */ void vmw_fence_fifo_down(struct vmw_fence_manager *fman) { struct list_head action_list; int ret; /* * The list may be altered while we traverse it, so always * restart when we've released the fman->lock. */ spin_lock(&fman->lock); fman->fifo_down = true; while (!list_empty(&fman->fence_list)) { struct vmw_fence_obj *fence = list_entry(fman->fence_list.prev, struct vmw_fence_obj, head); dma_fence_get(&fence->base); spin_unlock(&fman->lock); ret = vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); if (unlikely(ret != 0)) { list_del_init(&fence->head); dma_fence_signal(&fence->base); INIT_LIST_HEAD(&action_list); list_splice_init(&fence->seq_passed_actions, &action_list); vmw_fences_perform_actions(fman, &action_list); } BUG_ON(!list_empty(&fence->head)); dma_fence_put(&fence->base); spin_lock(&fman->lock); } spin_unlock(&fman->lock); } void vmw_fence_fifo_up(struct vmw_fence_manager *fman) { spin_lock(&fman->lock); fman->fifo_down = false; spin_unlock(&fman->lock); } /** * vmw_fence_obj_lookup - Look up a user-space fence object * * @tfile: A struct ttm_object_file identifying the caller. * @handle: A handle identifying the fence object. * @return: A struct vmw_user_fence base ttm object on success or * an error pointer on failure. * * The fence object is looked up and type-checked. The caller needs * to have opened the fence object first, but since that happens on * creation and fence objects aren't shareable, that's not an * issue currently. */ static struct ttm_base_object * vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) { struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); if (!base) { pr_err("Invalid fence object handle 0x%08lx.\n", (unsigned long)handle); return ERR_PTR(-EINVAL); } if (base->refcount_release != vmw_user_fence_base_release) { pr_err("Invalid fence object handle 0x%08lx.\n", (unsigned long)handle); ttm_base_object_unref(&base); return ERR_PTR(-EINVAL); } return base; } int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_fence_wait_arg *arg = (struct drm_vmw_fence_wait_arg *)data; unsigned long timeout; struct ttm_base_object *base; struct vmw_fence_obj *fence; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; int ret; uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); /* * 64-bit division not present on 32-bit systems, so do an * approximation. (Divide by 1000000). */ wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - (wait_timeout >> 26); if (!arg->cookie_valid) { arg->cookie_valid = 1; arg->kernel_cookie = jiffies + wait_timeout; } base = vmw_fence_obj_lookup(tfile, arg->handle); if (IS_ERR(base)) return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); timeout = jiffies; if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { ret = ((vmw_fence_obj_signaled(fence)) ? 0 : -EBUSY); goto out; } timeout = (unsigned long)arg->kernel_cookie - timeout; ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); out: ttm_base_object_unref(&base); /* * Optionally unref the fence object. */ if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) return ttm_ref_object_base_unref(tfile, arg->handle); return ret; } int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_fence_signaled_arg *arg = (struct drm_vmw_fence_signaled_arg *) data; struct ttm_base_object *base; struct vmw_fence_obj *fence; struct vmw_fence_manager *fman; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); base = vmw_fence_obj_lookup(tfile, arg->handle); if (IS_ERR(base)) return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); fman = fman_from_fence(fence); arg->signaled = vmw_fence_obj_signaled(fence); arg->signaled_flags = arg->flags; spin_lock(&fman->lock); arg->passed_seqno = dev_priv->last_read_seqno; spin_unlock(&fman->lock); ttm_base_object_unref(&base); return 0; } int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_fence_arg *arg = (struct drm_vmw_fence_arg *) data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, arg->handle); } /** * vmw_event_fence_action_seq_passed * * @action: The struct vmw_fence_action embedded in a struct * vmw_event_fence_action. * * This function is called when the seqno of the fence where @action is * attached has passed. It queues the event on the submitter's event list. * This function is always called from atomic context. */ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) { struct vmw_event_fence_action *eaction = container_of(action, struct vmw_event_fence_action, action); struct drm_device *dev = eaction->dev; struct drm_pending_event *event = eaction->event; if (unlikely(event == NULL)) return; spin_lock_irq(&dev->event_lock); if (likely(eaction->tv_sec != NULL)) { struct timespec64 ts; ktime_get_ts64(&ts); /* monotonic time, so no y2038 overflow */ *eaction->tv_sec = ts.tv_sec; *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; } drm_send_event_locked(dev, eaction->event); eaction->event = NULL; spin_unlock_irq(&dev->event_lock); } /** * vmw_event_fence_action_cleanup * * @action: The struct vmw_fence_action embedded in a struct * vmw_event_fence_action. * * This function is the struct vmw_fence_action destructor. It's typically * called from a workqueue. */ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) { struct vmw_event_fence_action *eaction = container_of(action, struct vmw_event_fence_action, action); vmw_fence_obj_unreference(&eaction->fence); kfree(eaction); } /** * vmw_fence_obj_add_action - Add an action to a fence object. * * @fence: The fence object. * @action: The action to add. * * Note that the action callbacks may be executed before this function * returns. */ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, struct vmw_fence_action *action) { struct vmw_fence_manager *fman = fman_from_fence(fence); bool run_update = false; mutex_lock(&fman->goal_irq_mutex); spin_lock(&fman->lock); fman->pending_actions[action->type]++; if (dma_fence_is_signaled_locked(&fence->base)) { struct list_head action_list; INIT_LIST_HEAD(&action_list); list_add_tail(&action->head, &action_list); vmw_fences_perform_actions(fman, &action_list); } else { list_add_tail(&action->head, &fence->seq_passed_actions); /* * This function may set fman::seqno_valid, so it must * be run with the goal_irq_mutex held. */ run_update = vmw_fence_goal_check_locked(fence); } spin_unlock(&fman->lock); if (run_update) { if (!fman->goal_irq_on) { fman->goal_irq_on = true; vmw_goal_waiter_add(fman->dev_priv); } vmw_fences_update(fman); } mutex_unlock(&fman->goal_irq_mutex); } /** * vmw_event_fence_action_queue - Post an event for sending when a fence * object seqno has passed. * * @file_priv: The file connection on which the event should be posted. * @fence: The fence object on which to post the event. * @event: Event to be posted. This event should've been alloced * using k[mz]alloc, and should've been completely initialized. * @tv_sec: If non-null, the variable pointed to will be assigned * current time tv_sec val when the fence signals. * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will * be assigned the current time tv_usec val when the fence signals. * @interruptible: Interruptible waits if possible. * * As a side effect, the object pointed to by @event may have been * freed when this function returns. If this function returns with * an error code, the caller needs to free that object. */ int vmw_event_fence_action_queue(struct drm_file *file_priv, struct vmw_fence_obj *fence, struct drm_pending_event *event, uint32_t *tv_sec, uint32_t *tv_usec, bool interruptible) { struct vmw_event_fence_action *eaction; struct vmw_fence_manager *fman = fman_from_fence(fence); eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); if (unlikely(!eaction)) return -ENOMEM; eaction->event = event; eaction->action.seq_passed = vmw_event_fence_action_seq_passed; eaction->action.cleanup = vmw_event_fence_action_cleanup; eaction->action.type = VMW_ACTION_EVENT; eaction->fence = vmw_fence_obj_reference(fence); eaction->dev = &fman->dev_priv->drm; eaction->tv_sec = tv_sec; eaction->tv_usec = tv_usec; vmw_fence_obj_add_action(fence, &eaction->action); return 0; } struct vmw_event_fence_pending { struct drm_pending_event base; struct drm_vmw_event_fence event; }; static int vmw_event_fence_action_create(struct drm_file *file_priv, struct vmw_fence_obj *fence, uint32_t flags, uint64_t user_data, bool interruptible) { struct vmw_event_fence_pending *event; struct vmw_fence_manager *fman = fman_from_fence(fence); struct drm_device *dev = &fman->dev_priv->drm; int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); if (unlikely(!event)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; goto out_no_space; } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; event->event.base.length = sizeof(*event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); if (unlikely(ret != 0)) { DRM_ERROR("Failed to allocate event space for this file.\n"); kfree(event); goto out_no_space; } if (flags & DRM_VMW_FE_FLAG_REQ_TIME) ret = vmw_event_fence_action_queue(file_priv, fence, &event->base, &event->event.tv_sec, &event->event.tv_usec, interruptible); else ret = vmw_event_fence_action_queue(file_priv, fence, &event->base, NULL, NULL, interruptible); if (ret != 0) goto out_no_queue; return 0; out_no_queue: drm_event_cancel_free(dev, &event->base); out_no_space: return ret; } int vmw_fence_event_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_fence_event_arg *arg = (struct drm_vmw_fence_event_arg *) data; struct vmw_fence_obj *fence = NULL; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct ttm_object_file *tfile = vmw_fp->tfile; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *)(unsigned long) arg->fence_rep; uint32_t handle; int ret; /* * Look up an existing fence object, * and if user-space wants a new reference, * add one. */ if (arg->handle) { struct ttm_base_object *base = vmw_fence_obj_lookup(tfile, arg->handle); if (IS_ERR(base)) return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); (void) vmw_fence_obj_reference(fence); if (user_fence_rep != NULL) { ret = ttm_ref_object_add(vmw_fp->tfile, base, NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); goto out_no_ref_obj; } handle = base->handle; } ttm_base_object_unref(&base); } /* * Create a new fence object. */ if (!fence) { ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, (user_fence_rep) ? &handle : NULL); if (unlikely(ret != 0)) { DRM_ERROR("Fence event failed to create fence.\n"); return ret; } } BUG_ON(fence == NULL); ret = vmw_event_fence_action_create(file_priv, fence, arg->flags, arg->user_data, true); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to attach event to fence.\n"); goto out_no_create; } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, handle, -1); vmw_fence_obj_unreference(&fence); return 0; out_no_create: if (user_fence_rep != NULL) ttm_ref_object_base_unref(tfile, handle); out_no_ref_obj: vmw_fence_obj_unreference(&fence); return ret; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_validation.h" #include <linux/slab.h> #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) /** * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. * @hash: A hash entry used for the duplicate detection hash table. * @coherent_count: If switching backup buffers, number of new coherent * resources that will have this buffer as a backup buffer. * * Bit fields are used since these structures are allocated and freed in * large numbers and space conservation is desired. */ struct vmw_validation_bo_node { struct ttm_validate_buffer base; struct vmwgfx_hash_item hash; unsigned int coherent_count; }; /** * struct vmw_validation_res_node - Resource validation metadata. * @head: List head for the resource validation list. * @hash: A hash entry used for the duplicate detection hash table. * @res: Reference counted resource pointer. * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer * to be assigned to a resource. * @new_guest_memory_offset: Offset into the new backup mob for resources * that can share MOBs. * @no_buffer_needed: Kernel does not need to allocate a MOB during validation, * the command stream provides a mob bind operation. * @switching_guest_memory_bo: The validation process is switching backup MOB. * @first_usage: True iff the resource has been seen only once in the current * validation batch. * @reserved: Whether the resource is currently reserved by this process. * @dirty_set: Change dirty status of the resource. * @dirty: Dirty information VMW_RES_DIRTY_XX. * @private: Optionally additional memory for caller-private data. * * Bit fields are used since these structures are allocated and freed in * large numbers and space conservation is desired. */ struct vmw_validation_res_node { struct list_head head; struct vmwgfx_hash_item hash; struct vmw_resource *res; struct vmw_bo *new_guest_memory_bo; unsigned long new_guest_memory_offset; u32 no_buffer_needed : 1; u32 switching_guest_memory_bo : 1; u32 first_usage : 1; u32 reserved : 1; u32 dirty : 1; u32 dirty_set : 1; unsigned long private[]; }; /** * vmw_validation_mem_alloc - Allocate kernel memory from the validation * context based allocator * @ctx: The validation context * @size: The number of bytes to allocated. * * The memory allocated may not exceed PAGE_SIZE, and the returned * address is aligned to sizeof(long). All memory allocated this way is * reclaimed after validation when calling any of the exported functions: * vmw_validation_unref_lists() * vmw_validation_revert() * vmw_validation_done() * * Return: Pointer to the allocated memory on success. NULL on failure. */ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, unsigned int size) { void *addr; size = vmw_validation_align(size); if (size > PAGE_SIZE) return NULL; if (ctx->mem_size_left < size) { struct page *page; if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; } page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return NULL; if (ctx->vm) ctx->vm_size_left -= PAGE_SIZE; list_add_tail(&page->lru, &ctx->page_list); ctx->page_address = page_address(page); ctx->mem_size_left = PAGE_SIZE; } addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left)); ctx->mem_size_left -= size; return addr; } /** * vmw_validation_mem_free - Free all memory allocated using * vmw_validation_mem_alloc() * @ctx: The validation context * * All memory previously allocated for this context using * vmw_validation_mem_alloc() is freed. */ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) { struct page *entry, *next; list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { list_del_init(&entry->lru); __free_page(entry); } ctx->mem_size_left = 0; if (ctx->vm && ctx->total_mem) { ctx->total_mem = 0; ctx->vm_size_left = 0; } } /** * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the * validation context's lists. * @ctx: The validation context to search. * @vbo: The buffer object to search for. * * Return: Pointer to the struct vmw_validation_bo_node referencing the * duplicate, or NULL if none found. */ static struct vmw_validation_bo_node * vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, struct vmw_bo *vbo) { struct vmw_validation_bo_node *bo_node = NULL; if (!ctx->merge_dups) return NULL; if (ctx->sw_context) { struct vmwgfx_hash_item *hash; unsigned long key = (unsigned long) vbo; hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { if (hash->key == key) { bo_node = container_of(hash, typeof(*bo_node), hash); break; } } } else { struct vmw_validation_bo_node *entry; list_for_each_entry(entry, &ctx->bo_list, base.head) { if (entry->base.bo == &vbo->tbo) { bo_node = entry; break; } } } return bo_node; } /** * vmw_validation_find_res_dup - Find a duplicate resource entry in the * validation context's lists. * @ctx: The validation context to search. * @res: Reference counted resource pointer. * * Return: Pointer to the struct vmw_validation_bo_node referencing the * duplicate, or NULL if none found. */ static struct vmw_validation_res_node * vmw_validation_find_res_dup(struct vmw_validation_context *ctx, struct vmw_resource *res) { struct vmw_validation_res_node *res_node = NULL; if (!ctx->merge_dups) return NULL; if (ctx->sw_context) { struct vmwgfx_hash_item *hash; unsigned long key = (unsigned long) res; hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) { if (hash->key == key) { res_node = container_of(hash, typeof(*res_node), hash); break; } } } else { struct vmw_validation_res_node *entry; list_for_each_entry(entry, &ctx->resource_ctx_list, head) { if (entry->res == res) { res_node = entry; goto out; } } list_for_each_entry(entry, &ctx->resource_list, head) { if (entry->res == res) { res_node = entry; break; } } } out: return res_node; } /** * vmw_validation_add_bo - Add a buffer object to the validation context. * @ctx: The validation context. * @vbo: The buffer object. * * Return: Zero on success, negative error code otherwise. */ int vmw_validation_add_bo(struct vmw_validation_context *ctx, struct vmw_bo *vbo) { struct vmw_validation_bo_node *bo_node; bo_node = vmw_validation_find_bo_dup(ctx, vbo); if (!bo_node) { struct ttm_validate_buffer *val_buf; bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node)); if (!bo_node) return -ENOMEM; if (ctx->sw_context) { bo_node->hash.key = (unsigned long) vbo; hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head, bo_node->hash.key); } val_buf = &bo_node->base; val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); if (!val_buf->bo) return -ESRCH; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &ctx->bo_list); } return 0; } /** * vmw_validation_add_resource - Add a resource to the validation context. * @ctx: The validation context. * @res: The resource. * @priv_size: Size of private, additional metadata. * @dirty: Whether to change dirty status. * @p_node: Output pointer of additional metadata address. * @first_usage: Whether this was the first time this resource was seen. * * Return: Zero on success, negative error code otherwise. */ int vmw_validation_add_resource(struct vmw_validation_context *ctx, struct vmw_resource *res, size_t priv_size, u32 dirty, void **p_node, bool *first_usage) { struct vmw_validation_res_node *node; node = vmw_validation_find_res_dup(ctx, res); if (node) { node->first_usage = 0; goto out_fill; } node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size); if (!node) { VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n"); return -ENOMEM; } if (ctx->sw_context) { node->hash.key = (unsigned long) res; hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); if (!node->res) return -ESRCH; node->first_usage = 1; if (!res->dev_priv->has_mob) { list_add_tail(&node->head, &ctx->resource_list); } else { switch (vmw_res_type(res)) { case vmw_res_context: case vmw_res_dx_context: list_add(&node->head, &ctx->resource_ctx_list); break; case vmw_res_cotable: list_add_tail(&node->head, &ctx->resource_ctx_list); break; default: list_add_tail(&node->head, &ctx->resource_list); break; } } out_fill: if (dirty) { node->dirty_set = 1; /* Overwriting previous information here is intentional! */ node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; } if (first_usage) *first_usage = node->first_usage; if (p_node) *p_node = &node->private; return 0; } /** * vmw_validation_res_set_dirty - Register a resource dirty set or clear during * validation. * @ctx: The validation context. * @val_private: The additional meta-data pointer returned when the * resource was registered with the validation context. Used to identify * the resource. * @dirty: Dirty information VMW_RES_DIRTY_XX */ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, void *val_private, u32 dirty) { struct vmw_validation_res_node *val; if (!dirty) return; val = container_of(val_private, typeof(*val), private); val->dirty_set = 1; /* Overwriting previous information here is intentional! */ val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; } /** * vmw_validation_res_switch_backup - Register a backup MOB switch during * validation. * @ctx: The validation context. * @val_private: The additional meta-data pointer returned when the * resource was registered with the validation context. Used to identify * the resource. * @vbo: The new backup buffer object MOB. This buffer object needs to have * already been registered with the validation context. * @guest_memory_offset: Offset into the new backup MOB. */ void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, void *val_private, struct vmw_bo *vbo, unsigned long guest_memory_offset) { struct vmw_validation_res_node *val; val = container_of(val_private, typeof(*val), private); val->switching_guest_memory_bo = 1; if (val->first_usage) val->no_buffer_needed = 1; val->new_guest_memory_bo = vbo; val->new_guest_memory_offset = guest_memory_offset; } /** * vmw_validation_res_reserve - Reserve all resources registered with this * validation context. * @ctx: The validation context. * @intr: Use interruptible waits when possible. * * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error * code on failure. */ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, bool intr) { struct vmw_validation_res_node *val; int ret = 0; list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); list_for_each_entry(val, &ctx->resource_list, head) { struct vmw_resource *res = val->res; ret = vmw_resource_reserve(res, intr, val->no_buffer_needed); if (ret) goto out_unreserve; val->reserved = 1; if (res->guest_memory_bo) { struct vmw_bo *vbo = res->guest_memory_bo; vmw_bo_placement_set(vbo, res->func->domain, res->func->busy_domain); ret = vmw_validation_add_bo(ctx, vbo); if (ret) goto out_unreserve; } if (val->switching_guest_memory_bo && val->new_guest_memory_bo && res->coherent) { struct vmw_validation_bo_node *bo_node = vmw_validation_find_bo_dup(ctx, val->new_guest_memory_bo); if (WARN_ON(!bo_node)) { ret = -EINVAL; goto out_unreserve; } bo_node->coherent_count++; } } return 0; out_unreserve: vmw_validation_res_unreserve(ctx, true); return ret; } /** * vmw_validation_res_unreserve - Unreserve all reserved resources * registered with this validation context. * @ctx: The validation context. * @backoff: Whether this is a backoff- of a commit-type operation. This * is used to determine whether to switch backup MOBs or not. */ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, bool backoff) { struct vmw_validation_res_node *val; list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); if (backoff) list_for_each_entry(val, &ctx->resource_list, head) { if (val->reserved) vmw_resource_unreserve(val->res, false, false, false, NULL, 0); } else list_for_each_entry(val, &ctx->resource_list, head) { if (val->reserved) vmw_resource_unreserve(val->res, val->dirty_set, val->dirty, val->switching_guest_memory_bo, val->new_guest_memory_bo, val->new_guest_memory_offset); } } /** * vmw_validation_bo_validate_single - Validate a single buffer object. * @bo: The TTM buffer object base. * @interruptible: Whether to perform waits interruptible if possible. * * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error * code on failure. */ static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, bool interruptible) { struct vmw_bo *vbo = to_vmw_bo(&bo->base); struct ttm_operation_ctx ctx = { .interruptible = interruptible, .no_wait_gpu = false }; int ret; if (atomic_read(&vbo->cpu_writers)) return -EBUSY; if (vbo->tbo.pin_count > 0) return 0; ret = ttm_bo_validate(bo, &vbo->placement, &ctx); if (ret == 0 || ret == -ERESTARTSYS) return ret; /* * If that failed, try again, this time evicting * previous contents. */ ctx.allow_res_evict = true; return ttm_bo_validate(bo, &vbo->placement, &ctx); } /** * vmw_validation_bo_validate - Validate all buffer objects registered with * the validation context. * @ctx: The validation context. * @intr: Whether to perform waits interruptible if possible. * * Return: Zero on success, -ERESTARTSYS if interrupted, * negative error code on failure. */ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) { struct vmw_validation_bo_node *entry; int ret; list_for_each_entry(entry, &ctx->bo_list, base.head) { struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); ret = vmw_validation_bo_validate_single(entry->base.bo, intr); if (ret) return ret; /* * Rather than having the resource code allocating the bo * dirty tracker in resource_unreserve() where we can't fail, * Do it here when validating the buffer object. */ if (entry->coherent_count) { unsigned int coherent_count = entry->coherent_count; while (coherent_count) { ret = vmw_bo_dirty_add(vbo); if (ret) return ret; coherent_count--; } entry->coherent_count -= coherent_count; } if (vbo->dirty) vmw_bo_dirty_scan(vbo); } return 0; } /** * vmw_validation_res_validate - Validate all resources registered with the * validation context. * @ctx: The validation context. * @intr: Whether to perform waits interruptible if possible. * * Before this function is called, all resource backup buffers must have * been validated. * * Return: Zero on success, -ERESTARTSYS if interrupted, * negative error code on failure. */ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) { struct vmw_validation_res_node *val; int ret; list_for_each_entry(val, &ctx->resource_list, head) { struct vmw_resource *res = val->res; struct vmw_bo *backup = res->guest_memory_bo; ret = vmw_resource_validate(res, intr, val->dirty_set && val->dirty); if (ret) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to validate resource.\n"); return ret; } /* Check if the resource switched backup buffer */ if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) { struct vmw_bo *vbo = res->guest_memory_bo; vmw_bo_placement_set(vbo, res->func->domain, res->func->busy_domain); ret = vmw_validation_add_bo(ctx, vbo); if (ret) return ret; } } return 0; } /** * vmw_validation_drop_ht - Reset the hash table used for duplicate finding * and unregister it from this validation context. * @ctx: The validation context. * * The hash table used for duplicate finding is an expensive resource and * may be protected by mutexes that may cause deadlocks during resource * unreferencing if held. After resource- and buffer object registering, * there is no longer any use for this hash table, so allow freeing it * either to shorten any mutex locking time, or before resources- and * buffer objects are freed during validation context cleanup. */ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) { struct vmw_validation_bo_node *entry; struct vmw_validation_res_node *val; if (!ctx->sw_context) return; list_for_each_entry(entry, &ctx->bo_list, base.head) hash_del_rcu(&entry->hash.head); list_for_each_entry(val, &ctx->resource_list, head) hash_del_rcu(&val->hash.head); list_for_each_entry(val, &ctx->resource_ctx_list, head) hash_del_rcu(&entry->hash.head); ctx->sw_context = NULL; } /** * vmw_validation_unref_lists - Unregister previously registered buffer * object and resources. * @ctx: The validation context. * * Note that this function may cause buffer object- and resource destructors * to be invoked. */ void vmw_validation_unref_lists(struct vmw_validation_context *ctx) { struct vmw_validation_bo_node *entry; struct vmw_validation_res_node *val; list_for_each_entry(entry, &ctx->bo_list, base.head) { ttm_bo_put(entry->base.bo); entry->base.bo = NULL; } list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list); list_for_each_entry(val, &ctx->resource_list, head) vmw_resource_unreference(&val->res); /* * No need to detach each list entry since they are all freed with * vmw_validation_free_mem. Just make the inaccessible. */ INIT_LIST_HEAD(&ctx->bo_list); INIT_LIST_HEAD(&ctx->resource_list); vmw_validation_mem_free(ctx); } /** * vmw_validation_prepare - Prepare a validation context for command * submission. * @ctx: The validation context. * @mutex: The mutex used to protect resource reservation. * @intr: Whether to perform waits interruptible if possible. * * Note that the single reservation mutex @mutex is an unfortunate * construct. Ideally resource reservation should be moved to per-resource * ww_mutexes. * If this functions doesn't return Zero to indicate success, all resources * are left unreserved but still referenced. * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code * on error. */ int vmw_validation_prepare(struct vmw_validation_context *ctx, struct mutex *mutex, bool intr) { int ret = 0; if (mutex) { if (intr) ret = mutex_lock_interruptible(mutex); else mutex_lock(mutex); if (ret) return -ERESTARTSYS; } ctx->res_mutex = mutex; ret = vmw_validation_res_reserve(ctx, intr); if (ret) goto out_no_res_reserve; ret = vmw_validation_bo_reserve(ctx, intr); if (ret) goto out_no_bo_reserve; ret = vmw_validation_bo_validate(ctx, intr); if (ret) goto out_no_validate; ret = vmw_validation_res_validate(ctx, intr); if (ret) goto out_no_validate; return 0; out_no_validate: vmw_validation_bo_backoff(ctx); out_no_bo_reserve: vmw_validation_res_unreserve(ctx, true); out_no_res_reserve: if (mutex) mutex_unlock(mutex); return ret; } /** * vmw_validation_revert - Revert validation actions if command submission * failed. * * @ctx: The validation context. * * The caller still needs to unref resources after a call to this function. */ void vmw_validation_revert(struct vmw_validation_context *ctx) { vmw_validation_bo_backoff(ctx); vmw_validation_res_unreserve(ctx, true); if (ctx->res_mutex) mutex_unlock(ctx->res_mutex); vmw_validation_unref_lists(ctx); } /** * vmw_validation_done - Commit validation actions after command submission * success. * @ctx: The validation context. * @fence: Fence with which to fence all buffer objects taking part in the * command submission. * * The caller does NOT need to unref resources after a call to this function. */ void vmw_validation_done(struct vmw_validation_context *ctx, struct vmw_fence_obj *fence) { vmw_validation_bo_fence(ctx, fence); vmw_validation_res_unreserve(ctx, false); if (ctx->res_mutex) mutex_unlock(ctx->res_mutex); vmw_validation_unref_lists(ctx); } /** * vmw_validation_preload_bo - Preload the validation memory allocator for a * call to vmw_validation_add_bo(). * @ctx: Pointer to the validation context. * * Iff this function returns successfully, the next call to * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal * but voids the guarantee. * * Returns: Zero if successful, %-EINVAL otherwise. */ int vmw_validation_preload_bo(struct vmw_validation_context *ctx) { unsigned int size = sizeof(struct vmw_validation_bo_node); if (!vmw_validation_mem_alloc(ctx, size)) return -ENOMEM; ctx->mem_size_left += size; return 0; } /** * vmw_validation_preload_res - Preload the validation memory allocator for a * call to vmw_validation_add_res(). * @ctx: Pointer to the validation context. * @size: Size of the validation node extra data. See below. * * Iff this function returns successfully, the next call to * vmw_validation_add_res() with the same or smaller @size is guaranteed not to * sleep. An error is not fatal but voids the guarantee. * * Returns: Zero if successful, %-EINVAL otherwise. */ int vmw_validation_preload_res(struct vmw_validation_context *ctx, unsigned int size) { size = vmw_validation_align(sizeof(struct vmw_validation_res_node) + size) + vmw_validation_align(sizeof(struct vmw_validation_bo_node)); if (!vmw_validation_mem_alloc(ctx, size)) return -ENOMEM; ctx->mem_size_left += size; return 0; } /** * vmw_validation_bo_backoff - Unreserve buffer objects registered with a * validation context * @ctx: The validation context * * This function unreserves the buffer objects previously reserved using * vmw_validation_bo_reserve. It's typically used as part of an error path */ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) { struct vmw_validation_bo_node *entry; /* * Switching coherent resource backup buffers failed. * Release corresponding buffer object dirty trackers. */ list_for_each_entry(entry, &ctx->bo_list, base.head) { if (entry->coherent_count) { unsigned int coherent_count = entry->coherent_count; struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); while (coherent_count--) vmw_bo_dirty_release(vbo); } } ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" /** * struct vmw_stream - Overlay stream simple resource. * @sres: The simple resource we derive from. * @stream_id: The overlay stream id. */ struct vmw_stream { struct vmw_simple_resource sres; u32 stream_id; }; /** * vmw_stream - Typecast a struct vmw_resource to a struct vmw_stream. * @res: Pointer to the struct vmw_resource. * * Returns: Returns a pointer to the struct vmw_stream. */ static struct vmw_stream * vmw_stream(struct vmw_resource *res) { return container_of(res, struct vmw_stream, sres.res); } /*************************************************************************** * Simple resource callbacks for struct vmw_stream **************************************************************************/ static void vmw_stream_hw_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_stream *stream = vmw_stream(res); int ret; ret = vmw_overlay_unref(dev_priv, stream->stream_id); WARN_ON_ONCE(ret != 0); } static int vmw_stream_init(struct vmw_resource *res, void *data) { struct vmw_stream *stream = vmw_stream(res); return vmw_overlay_claim(res->dev_priv, &stream->stream_id); } static void vmw_stream_set_arg_handle(void *data, u32 handle) { struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; arg->stream_id = handle; } static const struct vmw_simple_resource_func va_stream_func = { .res_func = { .res_type = vmw_res_stream, .needs_guest_memory = false, .may_evict = false, .type_name = "overlay stream", .domain = VMW_BO_DOMAIN_SYS, .busy_domain = VMW_BO_DOMAIN_SYS, .create = NULL, .destroy = NULL, .bind = NULL, .unbind = NULL }, .ttm_res_type = VMW_RES_STREAM, .size = sizeof(struct vmw_stream), .init = vmw_stream_init, .hw_destroy = vmw_stream_hw_destroy, .set_arg_handle = vmw_stream_set_arg_handle, }; /*************************************************************************** * End simple resource callbacks for struct vmw_stream **************************************************************************/ /** * vmw_stream_unref_ioctl - Ioctl to unreference a user-space handle to * a struct vmw_stream. * @dev: Pointer to the drm device. * @data: The ioctl argument * @file_priv: Pointer to a struct drm_file identifying the caller. * * Return: * 0 if successful. * Negative error value on failure. */ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, arg->stream_id); } /** * vmw_stream_claim_ioctl - Ioctl to claim a struct vmw_stream overlay. * @dev: Pointer to the drm device. * @data: The ioctl argument * @file_priv: Pointer to a struct drm_file identifying the caller. * * Return: * 0 if successful. * Negative error value on failure. */ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return vmw_simple_resource_create_ioctl(dev, data, file_priv, &va_stream_func); } /** * vmw_user_stream_lookup - Look up a struct vmw_user_stream from a handle. * @dev_priv: Pointer to a struct vmw_private. * @tfile: struct ttm_object_file identifying the caller. * @inout_id: In: The user-space handle. Out: The stream id. * @out: On output contains a refcounted pointer to the embedded * struct vmw_resource. * * Return: * 0 if successful. * Negative error value on failure. */ int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t *inout_id, struct vmw_resource **out) { struct vmw_stream *stream; struct vmw_resource *res = vmw_simple_resource_lookup(tfile, *inout_id, &va_stream_func); if (IS_ERR(res)) return PTR_ERR(res); stream = vmw_stream(res); *inout_id = stream->stream_id; *out = res; return 0; }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_va.c
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * Copyright 2021-2023 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "drm/drm_prime.h" #include "drm/drm_gem_ttm_helper.h" static void vmw_gem_object_free(struct drm_gem_object *gobj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); if (bo) ttm_bo_put(bo); } static int vmw_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { return 0; } static void vmw_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv) { } static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); struct vmw_bo *vbo = to_vmw_bo(obj); int ret; ret = ttm_bo_reserve(bo, false, false, NULL); if (unlikely(ret != 0)) goto err; vmw_bo_pin_reserved(vbo, do_pin); ttm_bo_unreserve(bo); err: return ret; } static int vmw_gem_object_pin(struct drm_gem_object *obj) { return vmw_gem_pin_private(obj, true); } static void vmw_gem_object_unpin(struct drm_gem_object *obj) { vmw_gem_pin_private(obj, false); } static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); struct vmw_ttm_tt *vmw_tt = container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); if (vmw_tt->vsgt.sgt) return vmw_tt->vsgt.sgt; return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); } static const struct vm_operations_struct vmw_vm_ops = { .pfn_mkwrite = vmw_bo_vm_mkwrite, .page_mkwrite = vmw_bo_vm_mkwrite, .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, }; static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .free = vmw_gem_object_free, .open = vmw_gem_object_open, .close = vmw_gem_object_close, .print_info = drm_gem_ttm_print_info, .pin = vmw_gem_object_pin, .unpin = vmw_gem_object_unpin, .get_sg_table = vmw_gem_object_get_sg_table, .vmap = drm_gem_ttm_vmap, .vunmap = drm_gem_ttm_vunmap, .mmap = drm_gem_ttm_mmap, .vm_ops = &vmw_vm_ops, }; int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, uint32_t *handle, struct vmw_bo **p_vbo) { int ret; struct vmw_bo_params params = { .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM, .busy_domain = VMW_BO_DOMAIN_SYS, .bo_type = ttm_bo_type_device, .size = size, .pin = false }; ret = vmw_bo_create(dev_priv, &params, p_vbo); if (ret != 0) goto out_no_bo; (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle); out_no_bo: return ret; } int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_alloc_dmabuf_arg *arg = (union drm_vmw_alloc_dmabuf_arg *)data; struct drm_vmw_alloc_dmabuf_req *req = &arg->req; struct drm_vmw_dmabuf_rep *rep = &arg->rep; struct vmw_bo *vbo; uint32_t handle; int ret; ret = vmw_gem_object_create_with_handle(dev_priv, filp, req->size, &handle, &vbo); if (ret) goto out_no_bo; rep->handle = handle; rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node); rep->cur_gmr_id = handle; rep->cur_gmr_offset = 0; /* drop reference from allocate - handle holds it now */ drm_gem_object_put(&vbo->tbo.base); out_no_bo: return ret; } #if defined(CONFIG_DEBUG_FS) static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) { const char *placement; const char *type; switch (bo->tbo.resource->mem_type) { case TTM_PL_SYSTEM: placement = " CPU"; break; case VMW_PL_GMR: placement = " GMR"; break; case VMW_PL_MOB: placement = " MOB"; break; case VMW_PL_SYSTEM: placement = "VCPU"; break; case TTM_PL_VRAM: placement = "VRAM"; break; default: placement = "None"; break; } switch (bo->tbo.type) { case ttm_bo_type_device: type = "device"; break; case ttm_bo_type_kernel: type = "kernel"; break; case ttm_bo_type_sg: type = "sg "; break; default: type = "none "; break; } seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s", id, bo->tbo.base.size, placement, type); seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", bo->tbo.priority, bo->tbo.pin_count, kref_read(&bo->tbo.base.refcount), kref_read(&bo->tbo.kref)); seq_puts(m, "\n"); } static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) { struct vmw_private *vdev = (struct vmw_private *)m->private; struct drm_device *dev = &vdev->drm; struct drm_file *file; int r; r = mutex_lock_interruptible(&dev->filelist_mutex); if (r) return r; list_for_each_entry(file, &dev->filelist, lhead) { struct task_struct *task; struct drm_gem_object *gobj; int id; /* * Although we have a valid reference on file->pid, that does * not guarantee that the task_struct who called get_pid() is * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ rcu_read_lock(); task = pid_task(file->pid, PIDTYPE_TGID); seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), task ? task->comm : "<unknown>"); rcu_read_unlock(); spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, id) { struct vmw_bo *bo = to_vmw_bo(gobj); vmw_bo_print_info(id, bo, m); } spin_unlock(&file->table_lock); } mutex_unlock(&dev->filelist_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info); #endif void vmw_debugfs_gem_init(struct vmw_private *vdev) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = vdev->drm.primary; struct dentry *root = minor->debugfs_root; debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev, &vmw_debugfs_gem_info_fops); #endif }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2015 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * This file implements the vmwgfx context binding manager, * The sole reason for having to use this code is that vmware guest * backed contexts can be swapped out to their backing mobs by the device * at any time, also swapped in at any time. At swapin time, the device * validates the context bindings to make sure they point to valid resources. * It's this outside-of-drawcall validation (that can happen at any time), * that makes this code necessary. * * We therefore need to kill any context bindings pointing to a resource * when the resource is swapped out. Furthermore, if the vmwgfx driver has * swapped out the context we can't swap it in again to kill bindings because * of backing mob reservation lockdep violations, so as part of * context swapout, also kill all bindings of a context, so that they are * already killed if a resource to which a binding points * needs to be swapped out. * * Note that a resource can be pointed to by bindings from multiple contexts, * Therefore we can't easily protect this data by a per context mutex * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex * to protect all binding manager data. * * Finally, any association between a context and a global resource * (surface, shader or even DX query) is conceptually a context binding that * needs to be tracked by this code. */ #include "vmwgfx_drv.h" #include "vmwgfx_binding.h" #include "device_include/svga3d_reg.h" #define VMW_BINDING_RT_BIT 0 #define VMW_BINDING_PS_BIT 1 #define VMW_BINDING_SO_T_BIT 2 #define VMW_BINDING_VB_BIT 3 #define VMW_BINDING_UAV_BIT 4 #define VMW_BINDING_CS_UAV_BIT 5 #define VMW_BINDING_NUM_BITS 6 #define VMW_BINDING_PS_SR_BIT 0 /** * struct vmw_ctx_binding_state - per context binding state * * @dev_priv: Pointer to device private structure. * @list: linked list of individual active bindings. * @render_targets: Render target bindings. * @texture_units: Texture units bindings. * @ds_view: Depth-stencil view binding. * @so_targets: StreamOutput target bindings. * @vertex_buffers: Vertex buffer bindings. * @index_buffer: Index buffer binding. * @per_shader: Per shader-type bindings. * @ua_views: UAV bindings. * @so_state: StreamOutput bindings. * @dirty: Bitmap tracking per binding-type changes that have not yet * been emitted to the device. * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that * have not yet been emitted to the device. * @bind_cmd_buffer: Scratch space used to construct binding commands. * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the * device binding slot of the first command data entry in @bind_cmd_buffer. * * Note that this structure also provides storage space for the individual * struct vmw_ctx_binding objects, so that no dynamic allocation is needed * for individual bindings. * */ struct vmw_ctx_binding_state { struct vmw_private *dev_priv; struct list_head list; struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX]; struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS]; struct vmw_ctx_bindinfo_view ds_view; struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS]; struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; struct vmw_ctx_bindinfo_ib index_buffer; struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE]; struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE]; struct vmw_ctx_bindinfo_so so_state; unsigned long dirty; DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS]; u32 bind_cmd_count; u32 bind_first_slot; }; static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs); static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind); static void vmw_binding_build_asserts(void) __attribute__ ((unused)); typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); /** * struct vmw_binding_info - Per binding type information for the binding * manager * * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo. * @offsets: array[shader_slot] of offsets to the array[slot] * of struct bindings for the binding type. * @scrub_func: Pointer to the scrub function for this binding type. * * Holds static information to help optimize the binding manager and avoid * an excessive amount of switch statements. */ struct vmw_binding_info { size_t size; const size_t *offsets; vmw_scrub_func scrub_func; }; /* * A number of static variables that help determine the scrub func and the * location of the struct vmw_ctx_bindinfo slots for each binding type. */ static const size_t vmw_binding_shader_offsets[] = { offsetof(struct vmw_ctx_binding_state, per_shader[0].shader), offsetof(struct vmw_ctx_binding_state, per_shader[1].shader), offsetof(struct vmw_ctx_binding_state, per_shader[2].shader), offsetof(struct vmw_ctx_binding_state, per_shader[3].shader), offsetof(struct vmw_ctx_binding_state, per_shader[4].shader), offsetof(struct vmw_ctx_binding_state, per_shader[5].shader), }; static const size_t vmw_binding_rt_offsets[] = { offsetof(struct vmw_ctx_binding_state, render_targets), }; static const size_t vmw_binding_tex_offsets[] = { offsetof(struct vmw_ctx_binding_state, texture_units), }; static const size_t vmw_binding_cb_offsets[] = { offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers), offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers), offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers), offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers), offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers), offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers), }; static const size_t vmw_binding_dx_ds_offsets[] = { offsetof(struct vmw_ctx_binding_state, ds_view), }; static const size_t vmw_binding_sr_offsets[] = { offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res), offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res), offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res), offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res), offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res), offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res), }; static const size_t vmw_binding_so_target_offsets[] = { offsetof(struct vmw_ctx_binding_state, so_targets), }; static const size_t vmw_binding_vb_offsets[] = { offsetof(struct vmw_ctx_binding_state, vertex_buffers), }; static const size_t vmw_binding_ib_offsets[] = { offsetof(struct vmw_ctx_binding_state, index_buffer), }; static const size_t vmw_binding_uav_offsets[] = { offsetof(struct vmw_ctx_binding_state, ua_views[0].views), }; static const size_t vmw_binding_cs_uav_offsets[] = { offsetof(struct vmw_ctx_binding_state, ua_views[1].views), }; static const size_t vmw_binding_so_offsets[] = { offsetof(struct vmw_ctx_binding_state, so_state), }; static const struct vmw_binding_info vmw_binding_infos[] = { [vmw_ctx_binding_shader] = { .size = sizeof(struct vmw_ctx_bindinfo_shader), .offsets = vmw_binding_shader_offsets, .scrub_func = vmw_binding_scrub_shader}, [vmw_ctx_binding_rt] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_rt_offsets, .scrub_func = vmw_binding_scrub_render_target}, [vmw_ctx_binding_tex] = { .size = sizeof(struct vmw_ctx_bindinfo_tex), .offsets = vmw_binding_tex_offsets, .scrub_func = vmw_binding_scrub_texture}, [vmw_ctx_binding_cb] = { .size = sizeof(struct vmw_ctx_bindinfo_cb), .offsets = vmw_binding_cb_offsets, .scrub_func = vmw_binding_scrub_cb}, [vmw_ctx_binding_dx_shader] = { .size = sizeof(struct vmw_ctx_bindinfo_shader), .offsets = vmw_binding_shader_offsets, .scrub_func = vmw_binding_scrub_dx_shader}, [vmw_ctx_binding_dx_rt] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_rt_offsets, .scrub_func = vmw_binding_scrub_dx_rt}, [vmw_ctx_binding_sr] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_sr_offsets, .scrub_func = vmw_binding_scrub_sr}, [vmw_ctx_binding_ds] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_dx_ds_offsets, .scrub_func = vmw_binding_scrub_dx_rt}, [vmw_ctx_binding_so_target] = { .size = sizeof(struct vmw_ctx_bindinfo_so_target), .offsets = vmw_binding_so_target_offsets, .scrub_func = vmw_binding_scrub_so_target}, [vmw_ctx_binding_vb] = { .size = sizeof(struct vmw_ctx_bindinfo_vb), .offsets = vmw_binding_vb_offsets, .scrub_func = vmw_binding_scrub_vb}, [vmw_ctx_binding_ib] = { .size = sizeof(struct vmw_ctx_bindinfo_ib), .offsets = vmw_binding_ib_offsets, .scrub_func = vmw_binding_scrub_ib}, [vmw_ctx_binding_uav] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_uav_offsets, .scrub_func = vmw_binding_scrub_uav}, [vmw_ctx_binding_cs_uav] = { .size = sizeof(struct vmw_ctx_bindinfo_view), .offsets = vmw_binding_cs_uav_offsets, .scrub_func = vmw_binding_scrub_cs_uav}, [vmw_ctx_binding_so] = { .size = sizeof(struct vmw_ctx_bindinfo_so), .offsets = vmw_binding_so_offsets, .scrub_func = vmw_binding_scrub_so}, }; /** * vmw_cbs_context - Return a pointer to the context resource of a * context binding state tracker. * * @cbs: The context binding state tracker. * * Provided there are any active bindings, this function will return an * unreferenced pointer to the context resource that owns the context * binding state tracker. If there are no active bindings, this function * will return NULL. Note that the caller must somehow ensure that a reference * is held on the context resource prior to calling this function. */ static const struct vmw_resource * vmw_cbs_context(const struct vmw_ctx_binding_state *cbs) { if (list_empty(&cbs->list)) return NULL; return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo, ctx_list)->ctx; } /** * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location. * * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot. * @bt: The binding type. * @shader_slot: The shader slot of the binding. If none, then set to 0. * @slot: The slot of the binding. */ static struct vmw_ctx_bindinfo * vmw_binding_loc(struct vmw_ctx_binding_state *cbs, enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot) { const struct vmw_binding_info *b = &vmw_binding_infos[bt]; size_t offset = b->offsets[shader_slot] + b->size*slot; return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset); } /** * vmw_binding_drop: Stop tracking a context binding * * @bi: Pointer to binding tracker storage. * * Stops tracking a context binding, and re-initializes its storage. * Typically used when the context binding is replaced with a binding to * another (or the same, for that matter) resource. */ static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) { list_del(&bi->ctx_list); if (!list_empty(&bi->res_list)) list_del(&bi->res_list); bi->ctx = NULL; } /** * vmw_binding_add: Start tracking a context binding * * @cbs: Pointer to the context binding state tracker. * @bi: Information about the binding to track. * @shader_slot: The shader slot of the binding. * @slot: The slot of the binding. * * Starts tracking the binding in the context binding * state structure @cbs. */ void vmw_binding_add(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo *bi, u32 shader_slot, u32 slot) { struct vmw_ctx_bindinfo *loc = vmw_binding_loc(cbs, bi->bt, shader_slot, slot); const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt]; if (loc->ctx != NULL) vmw_binding_drop(loc); memcpy(loc, bi, b->size); loc->scrubbed = false; list_add(&loc->ctx_list, &cbs->list); INIT_LIST_HEAD(&loc->res_list); } /** * vmw_binding_cb_offset_update: Update the offset of a cb binding * * @cbs: Pointer to the context binding state tracker. * @shader_slot: The shader slot of the binding. * @slot: The slot of the binding. * @offsetInBytes: The new offset of the binding. * * Updates the offset of an existing cb binding in the context binding * state structure @cbs. */ void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs, u32 shader_slot, u32 slot, u32 offsetInBytes) { struct vmw_ctx_bindinfo *loc = vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot); struct vmw_ctx_bindinfo_cb *loc_cb = (struct vmw_ctx_bindinfo_cb *)((u8 *) loc); loc_cb->offset = offsetInBytes; } /** * vmw_binding_add_uav_index - Add UAV index for tracking. * @cbs: Pointer to the context binding state tracker. * @slot: UAV type to which bind this index. * @index: The splice index to track. */ void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot, uint32 index) { cbs->ua_views[slot].index = index; } /** * vmw_binding_transfer: Transfer a context binding tracking entry. * * @cbs: Pointer to the persistent context binding state tracker. * @from: Staged binding info built during execbuf * @bi: Information about the binding to track. * */ static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_binding_state *from, const struct vmw_ctx_bindinfo *bi) { size_t offset = (unsigned long)bi - (unsigned long)from; struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *) ((unsigned long) cbs + offset); if (loc->ctx != NULL) { WARN_ON(bi->scrubbed); vmw_binding_drop(loc); } if (bi->res != NULL) { memcpy(loc, bi, vmw_binding_infos[bi->bt].size); list_add_tail(&loc->ctx_list, &cbs->list); list_add_tail(&loc->res_list, &loc->res->binding_head); } } /** * vmw_binding_state_kill - Kill all bindings associated with a * struct vmw_ctx_binding state structure, and re-initialize the structure. * * @cbs: Pointer to the context binding state tracker. * * Emits commands to scrub all bindings associated with the * context binding state tracker. Then re-initializes the whole structure. */ void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs) { struct vmw_ctx_bindinfo *entry, *next; vmw_binding_state_scrub(cbs); list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) vmw_binding_drop(entry); } /** * vmw_binding_state_scrub - Scrub all bindings associated with a * struct vmw_ctx_binding state structure. * * @cbs: Pointer to the context binding state tracker. * * Emits commands to scrub all bindings associated with the * context binding state tracker. */ void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs) { struct vmw_ctx_bindinfo *entry; list_for_each_entry(entry, &cbs->list, ctx_list) { if (!entry->scrubbed) { (void) vmw_binding_infos[entry->bt].scrub_func (entry, false); entry->scrubbed = true; } } (void) vmw_binding_emit_dirty(cbs); } /** * vmw_binding_res_list_kill - Kill all bindings on a * resource binding list * * @head: list head of resource binding list * * Kills all bindings associated with a specific resource. Typically * called before the resource is destroyed. */ void vmw_binding_res_list_kill(struct list_head *head) { struct vmw_ctx_bindinfo *entry, *next; vmw_binding_res_list_scrub(head); list_for_each_entry_safe(entry, next, head, res_list) vmw_binding_drop(entry); } /** * vmw_binding_res_list_scrub - Scrub all bindings on a * resource binding list * * @head: list head of resource binding list * * Scrub all bindings associated with a specific resource. Typically * called before the resource is evicted. */ void vmw_binding_res_list_scrub(struct list_head *head) { struct vmw_ctx_bindinfo *entry; list_for_each_entry(entry, head, res_list) { if (!entry->scrubbed) { (void) vmw_binding_infos[entry->bt].scrub_func (entry, false); entry->scrubbed = true; } } list_for_each_entry(entry, head, res_list) { struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(entry->ctx); (void) vmw_binding_emit_dirty(cbs); } } /** * vmw_binding_state_commit - Commit staged binding info * * @to: Staged binding info area to copy into to. * @from: Staged binding info built during execbuf. * * Transfers binding info from a temporary structure * (typically used by execbuf) to the persistent * structure in the context. This can be done once commands have been * submitted to hardware */ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, struct vmw_ctx_binding_state *from) { struct vmw_ctx_bindinfo *entry, *next; list_for_each_entry_safe(entry, next, &from->list, ctx_list) { vmw_binding_transfer(to, from, entry); vmw_binding_drop(entry); } /* Also transfer uav splice indices */ to->ua_views[0].index = from->ua_views[0].index; to->ua_views[1].index = from->ua_views[1].index; } /** * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context * * @cbs: Pointer to the context binding state tracker. * * Walks through the context binding list and rebinds all scrubbed * resources. */ int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs) { struct vmw_ctx_bindinfo *entry; int ret; list_for_each_entry(entry, &cbs->list, ctx_list) { if (likely(!entry->scrubbed)) continue; if ((entry->res == NULL || entry->res->id == SVGA3D_INVALID_ID)) continue; ret = vmw_binding_infos[entry->bt].scrub_func(entry, true); if (unlikely(ret != 0)) return ret; entry->scrubbed = false; } return vmw_binding_emit_dirty(cbs); } /** * vmw_binding_scrub_shader - scrub a shader binding from a context. * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_shader *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdSetShader body; } *cmd; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_SET_SHADER; cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_binding_scrub_render_target - scrub a render target binding * from a context. * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_view *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdSetRenderTarget body; } *cmd; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = binding->slot; cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); cmd->body.target.face = 0; cmd->body.target.mipmap = 0; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_binding_scrub_texture - scrub a texture binding from a context. * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. * * TODO: Possibly complement this function with a function that takes * a list of texture bindings and combines them to a single command. */ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_tex *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; struct { SVGA3dCmdSetTextureState c; SVGA3dTextureState s1; } body; } *cmd; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; cmd->header.size = sizeof(cmd->body); cmd->body.c.cid = bi->ctx->id; cmd->body.s1.stage = binding->texture_stage; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context. * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_shader *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetShader body; } *cmd; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER; cmd->header.size = sizeof(cmd->body); cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_binding_scrub_cb - scrub a constant buffer binding from a context. * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_cb *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetSingleConstantBuffer body; } *cmd; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER; cmd->header.size = sizeof(cmd->body); cmd->body.slot = binding->slot; cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; if (rebind) { cmd->body.offsetInBytes = binding->offset; cmd->body.sizeInBytes = binding->size; cmd->body.sid = bi->res->id; } else { cmd->body.offsetInBytes = 0; cmd->body.sizeInBytes = 0; cmd->body.sid = SVGA3D_INVALID_ID; } vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_collect_view_ids - Build view id data for a view binding command * without checking which bindings actually need to be emitted * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * @biv: Pointer to where the binding info array is stored in @cbs * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings and builds a buffer of view id data. * Stops at the first non-existing binding in the @bi array. * On output, @cbs->bind_cmd_count contains the number of bindings to be * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer * contains the command data. */ static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo_view *biv, u32 max_num) { unsigned long i; cbs->bind_cmd_count = 0; cbs->bind_first_slot = 0; for (i = 0; i < max_num; ++i, ++biv) { if (!biv->bi.ctx) break; cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = ((biv->bi.scrubbed) ? SVGA3D_INVALID_ID : biv->bi.res->id); } } /** * vmw_collect_dirty_view_ids - Build view id data for a view binding command * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * @bi: Pointer to where the binding info array is stored in @cbs * @dirty: Bitmap indicating which bindings need to be emitted. * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings that need to be emitted and * builds a buffer of view id data. * On output, @cbs->bind_cmd_count contains the number of bindings to be * emitted, @cbs->bind_first_slot indicates the index of the first emitted * binding, and @cbs->bind_cmd_buffer contains the command data. */ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo *bi, unsigned long *dirty, u32 max_num) { const struct vmw_ctx_bindinfo_view *biv = container_of(bi, struct vmw_ctx_bindinfo_view, bi); unsigned long i, next_bit; cbs->bind_cmd_count = 0; i = find_first_bit(dirty, max_num); next_bit = i; cbs->bind_first_slot = i; biv += i; for (; i < max_num; ++i, ++biv) { cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = ((!biv->bi.ctx || biv->bi.scrubbed) ? SVGA3D_INVALID_ID : biv->bi.res->id); if (next_bit == i) { next_bit = find_next_bit(dirty, max_num, i + 1); if (next_bit >= max_num) break; } } } /** * vmw_emit_set_sr - Issue delayed DX shader resource binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * @shader_slot: The shader slot of the binding. */ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, int shader_slot) { const struct vmw_ctx_bindinfo *loc = &cbs->per_shader[shader_slot].shader_res[0].bi; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetShaderResources body; } *cmd; size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_dirty_view_ids(cbs, loc, cbs->per_shader[shader_slot].dirty_sr, SVGA3D_DX_MAX_SRVIEWS); if (cbs->bind_cmd_count == 0) return 0; view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES; cmd->header.size = sizeof(cmd->body) + view_id_size; cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN; cmd->body.startView = cbs->bind_first_slot; memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); bitmap_clear(cbs->per_shader[shader_slot].dirty_sr, cbs->bind_first_slot, cbs->bind_cmd_count); return 0; } /** * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state */ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) { const struct vmw_ctx_bindinfo_view *loc = &cbs->render_targets[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetRenderTargets body; } *cmd; size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_view_ids(cbs, loc, SVGA3D_DX_MAX_RENDER_TARGETS); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS; cmd->header.size = sizeof(cmd->body) + view_id_size; if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed) cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id; else cmd->body.depthStencilViewId = SVGA3D_INVALID_ID; memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } /** * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command * without checking which bindings actually need to be emitted * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * @biso: Pointer to where the binding info array is stored in @cbs * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data. * Stops at the first non-existing binding in the @bi array. * On output, @cbs->bind_cmd_count contains the number of bindings to be * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer * contains the command data. */ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo_so_target *biso, u32 max_num) { unsigned long i; SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer; cbs->bind_cmd_count = 0; cbs->bind_first_slot = 0; for (i = 0; i < max_num; ++i, ++biso, ++so_buffer, ++cbs->bind_cmd_count) { if (!biso->bi.ctx) break; if (!biso->bi.scrubbed) { so_buffer->sid = biso->bi.res->id; so_buffer->offset = biso->offset; so_buffer->sizeInBytes = biso->size; } else { so_buffer->sid = SVGA3D_INVALID_ID; so_buffer->offset = 0; so_buffer->sizeInBytes = 0; } } } /** * vmw_emit_set_so_target - Issue delayed streamout binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state */ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs) { const struct vmw_ctx_bindinfo_so_target *loc = &cbs->so_targets[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetSOTargets body; } *cmd; size_t cmd_size, so_target_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS); if (cbs->bind_cmd_count == 0) return 0; so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget); cmd_size = sizeof(*cmd) + so_target_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS; cmd->header.size = sizeof(cmd->body) + so_target_size; memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } /** * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * */ static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs) { struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0]; u32 i; int ret; for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) { if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty)) continue; ret = vmw_emit_set_sr(cbs, i); if (ret) break; __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty); } return 0; } /** * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a * SVGA3dCmdDXSetVertexBuffers command * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * @bi: Pointer to where the binding info array is stored in @cbs * @dirty: Bitmap indicating which bindings need to be emitted. * @max_num: Maximum number of entries in the @bi array. * * Scans the @bi array for bindings that need to be emitted and * builds a buffer of SVGA3dVertexBuffer data. * On output, @cbs->bind_cmd_count contains the number of bindings to be * emitted, @cbs->bind_first_slot indicates the index of the first emitted * binding, and @cbs->bind_cmd_buffer contains the command data. */ static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo *bi, unsigned long *dirty, u32 max_num) { const struct vmw_ctx_bindinfo_vb *biv = container_of(bi, struct vmw_ctx_bindinfo_vb, bi); unsigned long i, next_bit; SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer; cbs->bind_cmd_count = 0; i = find_first_bit(dirty, max_num); next_bit = i; cbs->bind_first_slot = i; biv += i; for (; i < max_num; ++i, ++biv, ++vbs) { if (!biv->bi.ctx || biv->bi.scrubbed) { vbs->sid = SVGA3D_INVALID_ID; vbs->stride = 0; vbs->offset = 0; } else { vbs->sid = biv->bi.res->id; vbs->stride = biv->stride; vbs->offset = biv->offset; } cbs->bind_cmd_count++; if (next_bit == i) { next_bit = find_next_bit(dirty, max_num, i + 1); if (next_bit >= max_num) break; } } } /** * vmw_emit_set_vb - Issue delayed vertex buffer binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * */ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) { const struct vmw_ctx_bindinfo *loc = &cbs->vertex_buffers[0].bi; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetVertexBuffers body; } *cmd; size_t cmd_size, set_vb_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); if (cbs->bind_cmd_count == 0) return 0; set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer); cmd_size = sizeof(*cmd) + set_vb_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS; cmd->header.size = sizeof(cmd->body) + set_vb_size; cmd->body.startBuffer = cbs->bind_first_slot; memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); bitmap_clear(cbs->dirty_vb, cbs->bind_first_slot, cbs->bind_cmd_count); return 0; } static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) { const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[0].views[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetUAViews body; } *cmd; size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS; cmd->header.size = sizeof(cmd->body) + view_id_size; /* Splice index is specified user-space */ cmd->body.uavSpliceIndex = cbs->ua_views[0].index; memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) { const struct vmw_ctx_bindinfo_view *loc = &cbs->ua_views[1].views[0]; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCSUAViews body; } *cmd; size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS; cmd->header.size = sizeof(cmd->body) + view_id_size; /* Start index is specified user-space */ cmd->body.startIndex = cbs->ua_views[1].index; memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } /** * vmw_binding_emit_dirty - Issue delayed binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state * * This function issues the delayed binding commands that arise from * previous scrub / unscrub calls. These binding commands are typically * commands that batch a number of bindings and therefore it makes sense * to delay them. */ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs) { int ret = 0; unsigned long hit = 0; while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit)) < VMW_BINDING_NUM_BITS) { switch (hit) { case VMW_BINDING_RT_BIT: ret = vmw_emit_set_rt(cbs); break; case VMW_BINDING_PS_BIT: ret = vmw_binding_emit_dirty_ps(cbs); break; case VMW_BINDING_SO_T_BIT: ret = vmw_emit_set_so_target(cbs); break; case VMW_BINDING_VB_BIT: ret = vmw_emit_set_vb(cbs); break; case VMW_BINDING_UAV_BIT: ret = vmw_emit_set_uav(cbs); break; case VMW_BINDING_CS_UAV_BIT: ret = vmw_emit_set_cs_uav(cbs); break; default: BUG(); } if (ret) return ret; __clear_bit(hit, &cbs->dirty); hit++; } return 0; } /** * vmw_binding_scrub_sr - Schedule a dx shaderresource binding * scrub from a context * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_view *biv = container_of(bi, struct vmw_ctx_bindinfo_view, bi); struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr); __set_bit(VMW_BINDING_PS_SR_BIT, &cbs->per_shader[biv->shader_slot].dirty); __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty); return 0; } /** * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding * scrub from a context * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty); return 0; } /** * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding * scrub from a context * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty); return 0; } /** * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding * scrub from a context * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_vb *bivb = container_of(bi, struct vmw_ctx_bindinfo_vb, bi); struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(bivb->slot, cbs->dirty_vb); __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty); return 0; } /** * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context * * @bi: single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_ib *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetIndexBuffer body; } *cmd; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER; cmd->header.size = sizeof(cmd->body); if (rebind) { cmd->body.sid = bi->res->id; cmd->body.format = binding->format; cmd->body.offset = binding->offset; } else { cmd->body.sid = SVGA3D_INVALID_ID; cmd->body.format = 0; cmd->body.offset = 0; } vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty); return 0; } static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx); __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty); return 0; } /** * vmw_binding_scrub_so - Scrub a streamoutput binding from context. * @bi: Single binding information. * @rebind: Whether to issue a bind instead of scrub command. */ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_ctx_bindinfo_so *binding = container_of(bi, typeof(*binding), bi); struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetStreamOutput body; } *cmd; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT; cmd->header.size = sizeof(cmd->body); cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } /** * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state. * * @dev_priv: Pointer to a device private structure. * * Returns a pointer to a newly allocated struct or an error pointer on error. */ struct vmw_ctx_binding_state * vmw_binding_state_alloc(struct vmw_private *dev_priv) { struct vmw_ctx_binding_state *cbs; cbs = vzalloc(sizeof(*cbs)); if (!cbs) { return ERR_PTR(-ENOMEM); } cbs->dev_priv = dev_priv; INIT_LIST_HEAD(&cbs->list); return cbs; } /** * vmw_binding_state_free - Free a struct vmw_ctx_binding_state. * * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. */ void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) { vfree(cbs); } /** * vmw_binding_state_list - Get the binding list of a * struct vmw_ctx_binding_state * * @cbs: Pointer to the struct vmw_ctx_binding_state * * Returns the binding list which can be used to traverse through the bindings * and access the resource information of all bindings. */ struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs) { return &cbs->list; } /** * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state * * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared * * Drops all bindings registered in @cbs. No device binding actions are * performed. */ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs) { struct vmw_ctx_bindinfo *entry, *next; list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) vmw_binding_drop(entry); } /** * vmw_binding_dirtying - Return whether a binding type is dirtying its resource * @binding_type: The binding type * * Each time a resource is put on the validation list as the result of a * context binding referencing it, we need to determine whether that resource * will be dirtied (written to by the GPU) as a result of the corresponding * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target * and unordered access view bindings are capable of dirtying its resource. * * Return: Whether the binding type dirties the resource its binding points to. */ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type) { static u32 is_binding_dirtying[vmw_ctx_binding_max] = { [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET, }; /* Review this function as new bindings are added. */ BUILD_BUG_ON(vmw_ctx_binding_max != 14); return is_binding_dirtying[binding_type]; } /* * This function is unused at run-time, and only used to hold various build * asserts important for code optimization assumptions. */ static void vmw_binding_build_asserts(void) { BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3); BUILD_BUG_ON(SVGA3D_DX_MAX_RENDER_TARGETS > SVGA3D_RT_MAX); BUILD_BUG_ON(sizeof(uint32) != sizeof(u32)); /* * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various * view id arrays. */ BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX); BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS); BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS); /* * struct vmw_ctx_binding_state::bind_cmd_buffer is used for * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers */ BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) > VMW_MAX_VIEW_BINDINGS*sizeof(u32)); BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) > VMW_MAX_VIEW_BINDINGS*sizeof(u32)); }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright © 2018-2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_binding.h" #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include <drm/ttm/ttm_placement.h> /** * struct vmw_dx_streamoutput - Streamoutput resource metadata. * @res: Base resource struct. * @ctx: Non-refcounted context to which @res belong. * @cotable: Refcounted cotable holding this Streamoutput. * @cotable_head: List head for cotable-so_res list. * @id: User-space provided identifier. * @size: User-space provided mob size. * @committed: Whether streamoutput is actually created or pending creation. */ struct vmw_dx_streamoutput { struct vmw_resource res; struct vmw_resource *ctx; struct vmw_resource *cotable; struct list_head cotable_head; u32 id; u32 size; bool committed; }; static int vmw_dx_streamoutput_create(struct vmw_resource *res); static int vmw_dx_streamoutput_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); static const struct vmw_res_func vmw_dx_streamoutput_func = { .res_type = vmw_res_streamoutput, .needs_guest_memory = true, .may_evict = false, .type_name = "DX streamoutput", .domain = VMW_BO_DOMAIN_MOB, .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_dx_streamoutput_create, .destroy = NULL, /* Command buffer managed resource. */ .bind = vmw_dx_streamoutput_bind, .unbind = vmw_dx_streamoutput_unbind, .commit_notify = vmw_dx_streamoutput_commit_notify, }; static inline struct vmw_dx_streamoutput * vmw_res_to_dx_streamoutput(struct vmw_resource *res) { return container_of(res, struct vmw_dx_streamoutput, res); } /** * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput. * @res: The streamoutput resource. * * Return: 0 on success, negative error code on failure. */ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res) { struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); struct vmw_private *dev_priv = res->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXBindStreamOutput body; } *cmd; if (!list_empty(&so->cotable_head) || !so->committed ) return 0; cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT; cmd->header.size = sizeof(cmd->body); cmd->body.soid = so->id; cmd->body.mobid = res->guest_memory_bo->tbo.resource->start; cmd->body.offsetInBytes = res->guest_memory_offset; cmd->body.sizeInBytes = so->size; vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cotable_add_resource(so->cotable, &so->cotable_head); return 0; } static int vmw_dx_streamoutput_create(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); int ret = 0; WARN_ON_ONCE(!so->committed); if (vmw_resource_mob_attached(res)) { mutex_lock(&dev_priv->binding_mutex); ret = vmw_dx_streamoutput_unscrub(res); mutex_unlock(&dev_priv->binding_mutex); } res->id = so->id; return ret; } static int vmw_dx_streamoutput_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf) { struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = val_buf->bo; int ret; if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB)) return -EINVAL; mutex_lock(&dev_priv->binding_mutex); ret = vmw_dx_streamoutput_unscrub(res); mutex_unlock(&dev_priv->binding_mutex); return ret; } /** * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput. * @res: The streamoutput resource. * * Return: 0 on success, negative error code on failure. */ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); struct { SVGA3dCmdHeader header; SVGA3dCmdDXBindStreamOutput body; } *cmd; if (list_empty(&so->cotable_head)) return 0; WARN_ON_ONCE(!so->committed); cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT; cmd->header.size = sizeof(cmd->body); cmd->body.soid = res->id; cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; cmd->body.sizeInBytes = so->size; vmw_cmd_commit(dev_priv, sizeof(*cmd)); res->id = -1; list_del_init(&so->cotable_head); return 0; } static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_fence_obj *fence; int ret; if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB)) return -EINVAL; mutex_lock(&dev_priv->binding_mutex); ret = vmw_dx_streamoutput_scrub(res); mutex_unlock(&dev_priv->binding_mutex); if (ret) return ret; (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); vmw_bo_fence_single(val_buf->bo, fence); if (fence != NULL) vmw_fence_obj_unreference(&fence); return 0; } static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); if (state == VMW_CMDBUF_RES_ADD) { mutex_lock(&dev_priv->binding_mutex); vmw_cotable_add_resource(so->cotable, &so->cotable_head); so->committed = true; res->id = so->id; mutex_unlock(&dev_priv->binding_mutex); } else { mutex_lock(&dev_priv->binding_mutex); list_del_init(&so->cotable_head); so->committed = false; res->id = -1; mutex_unlock(&dev_priv->binding_mutex); } } /** * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key. * @man: Command buffer managed resource manager for current context. * @user_key: User-space identifier for lookup. * * Return: Valid refcounted vmw_resource on success, error pointer on failure. */ struct vmw_resource * vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, u32 user_key) { return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput, user_key); } static void vmw_dx_streamoutput_res_free(struct vmw_resource *res) { struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); vmw_resource_unreference(&so->cotable); kfree(so); } static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res) { /* Destroyed by user-space cmd buf or as part of context takedown. */ res->id = -1; } /** * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource. * @man: Command buffer managed resource manager for current context. * @ctx: Pointer to context resource. * @user_key: The identifier for this streamoutput. * @list: The list of staged command buffer managed resources. * * Return: 0 on success, negative error code on failure. */ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, struct vmw_resource *ctx, u32 user_key, struct list_head *list) { struct vmw_dx_streamoutput *so; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; int ret; so = kmalloc(sizeof(*so), GFP_KERNEL); if (!so) { return -ENOMEM; } res = &so->res; so->ctx = ctx; so->cotable = vmw_resource_reference (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT)); so->id = user_key; so->committed = false; INIT_LIST_HEAD(&so->cotable_head); ret = vmw_resource_init(dev_priv, res, true, vmw_dx_streamoutput_res_free, &vmw_dx_streamoutput_func); if (ret) goto out_resource_init; ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key, res, list); if (ret) goto out_resource_init; res->id = so->id; res->hw_destroy = vmw_dx_streamoutput_hw_destroy; out_resource_init: vmw_resource_unreference(&res); return ret; } /** * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct. * @res: The streamoutput res for which need to set size. * @size: The size provided by user-space to set. */ void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size) { struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); so->size = size; } /** * vmw_dx_streamoutput_remove - Stage streamoutput for removal. * @man: Command buffer managed resource manager for current context. * @user_key: The identifier for this streamoutput. * @list: The list of staged command buffer managed resources. * * Return: 0 on success, negative error code on failure. */ int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man, u32 user_key, struct list_head *list) { struct vmw_resource *r; return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput, (u32)user_key, list, &r); } /** * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback. * @dev_priv: Device private. * @list: The list of cotable resources. * @readback: Whether the call was part of a readback unbind. */ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, struct list_head *list, bool readback) { struct vmw_dx_streamoutput *entry, *next; lockdep_assert_held_once(&dev_priv->binding_mutex); list_for_each_entry_safe(entry, next, list, cotable_head) { WARN_ON(vmw_dx_streamoutput_scrub(&entry->res)); if (!readback) entry->committed =false; } }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Treat context OTables as resources to make use of the resource * backing MOB eviction mechanism, that is used to read back the COTable * whenever the backing MOB is evicted. */ #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_mksstat.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" #include <drm/ttm/ttm_placement.h> /** * struct vmw_cotable - Context Object Table resource * * @res: struct vmw_resource we are deriving from. * @ctx: non-refcounted pointer to the owning context. * @size_read_back: Size of data read back during eviction. * @seen_entries: Seen entries in command stream for this cotable. * @type: The cotable type. * @scrubbed: Whether the cotable has been scrubbed. * @resource_list: List of resources in the cotable. */ struct vmw_cotable { struct vmw_resource res; struct vmw_resource *ctx; size_t size_read_back; int seen_entries; u32 type; bool scrubbed; struct list_head resource_list; }; /** * struct vmw_cotable_info - Static info about cotable types * * @min_initial_entries: Min number of initial intries at cotable allocation * for this cotable type. * @size: Size of each entry. * @unbind_func: Unbind call-back function. */ struct vmw_cotable_info { u32 min_initial_entries; u32 size; void (*unbind_func)(struct vmw_private *, struct list_head *, bool); }; /* * Getting the initial size right is difficult because it all depends * on what the userspace is doing. The sizes will be aligned up to * a PAGE_SIZE so we just want to make sure that for majority of apps * the initial number of entries doesn't require an immediate resize. * For all cotables except SVGACOTableDXElementLayoutEntry and * SVGACOTableDXBlendStateEntry the initial number of entries fits * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and * SVGACOTableDXBlendStateEntry we want to reserve two pages, * because that's what all apps will require initially. */ static const struct vmw_cotable_info co_info[] = { {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, {PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, {PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, {1, sizeof(SVGACOTableDXSamplerEntry), NULL}, {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub}, {1, sizeof(SVGACOTableDXQueryEntry), NULL}, {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}, {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy} }; /* * Cotables with bindings that we remove must be scrubbed first, * otherwise, the device will swap in an invalid context when we remove * bindings before scrubbing a cotable... */ const SVGACOTableType vmw_cotable_scrub_order[] = { SVGA_COTABLE_RTVIEW, SVGA_COTABLE_DSVIEW, SVGA_COTABLE_SRVIEW, SVGA_COTABLE_DXSHADER, SVGA_COTABLE_ELEMENTLAYOUT, SVGA_COTABLE_BLENDSTATE, SVGA_COTABLE_DEPTHSTENCIL, SVGA_COTABLE_RASTERIZERSTATE, SVGA_COTABLE_SAMPLER, SVGA_COTABLE_STREAMOUTPUT, SVGA_COTABLE_DXQUERY, SVGA_COTABLE_UAVIEW, }; static int vmw_cotable_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_cotable_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static int vmw_cotable_create(struct vmw_resource *res); static int vmw_cotable_destroy(struct vmw_resource *res); static const struct vmw_res_func vmw_cotable_func = { .res_type = vmw_res_cotable, .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "context guest backed object tables", .domain = VMW_BO_DOMAIN_MOB, .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_cotable_create, .destroy = vmw_cotable_destroy, .bind = vmw_cotable_bind, .unbind = vmw_cotable_unbind, }; /** * vmw_cotable - Convert a struct vmw_resource pointer to a struct * vmw_cotable pointer * * @res: Pointer to the resource. */ static struct vmw_cotable *vmw_cotable(struct vmw_resource *res) { return container_of(res, struct vmw_cotable, res); } /** * vmw_cotable_destroy - Cotable resource destroy callback * * @res: Pointer to the cotable resource. * * There is no device cotable destroy command, so this function only * makes sure that the resource id is set to invalid. */ static int vmw_cotable_destroy(struct vmw_resource *res) { res->id = -1; return 0; } /** * vmw_cotable_unscrub - Undo a cotable unscrub operation * * @res: Pointer to the cotable resource * * This function issues commands to (re)bind the cotable to * its backing mob, which needs to be validated and reserved at this point. * This is identical to bind() except the function interface looks different. */ static int vmw_cotable_unscrub(struct vmw_resource *res) { struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCOTable body; } *cmd; WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB); dma_resv_assert_held(bo->base.resv); cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); WARN_ON(bo->resource->mem_type != VMW_PL_MOB); cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; cmd->header.size = sizeof(cmd->body); cmd->body.cid = vcotbl->ctx->id; cmd->body.type = vcotbl->type; cmd->body.mobid = bo->resource->start; cmd->body.validSizeInBytes = vcotbl->size_read_back; vmw_cmd_commit_flush(dev_priv, sizeof(*cmd)); vcotbl->scrubbed = false; return 0; } /** * vmw_cotable_bind - Undo a cotable unscrub operation * * @res: Pointer to the cotable resource * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller * for convenience / fencing. * * This function issues commands to (re)bind the cotable to * its backing mob, which needs to be validated and reserved at this point. */ static int vmw_cotable_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf) { /* * The create() callback may have changed @res->backup without * the caller noticing, and with val_buf->bo still pointing to * the old backup buffer. Although hackish, and not used currently, * take the opportunity to correct the value here so that it's not * misused in the future. */ val_buf->bo = &res->guest_memory_bo->tbo; return vmw_cotable_unscrub(res); } /** * vmw_cotable_scrub - Scrub the cotable from the device. * * @res: Pointer to the cotable resource. * @readback: Whether initiate a readback of the cotable data to the backup * buffer. * * In some situations (context swapouts) it might be desirable to make the * device forget about the cotable without performing a full unbind. A full * unbind requires reserved backup buffers and it might not be possible to * reserve them due to locking order violation issues. The vmw_cotable_scrub * function implements a partial unbind() without that requirement but with the * following restrictions. * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must * be called. * 2) Before the cotable backing buffer is used by the CPU, or during the * resource destruction, vmw_cotable_unbind() must be called. */ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) { struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_private *dev_priv = res->dev_priv; size_t submit_size; struct { SVGA3dCmdHeader header; SVGA3dCmdDXReadbackCOTable body; } *cmd0; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCOTable body; } *cmd1; if (vcotbl->scrubbed) return 0; if (co_info[vcotbl->type].unbind_func) co_info[vcotbl->type].unbind_func(dev_priv, &vcotbl->resource_list, readback); submit_size = sizeof(*cmd1); if (readback) submit_size += sizeof(*cmd0); cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); if (!cmd1) return -ENOMEM; vcotbl->size_read_back = 0; if (readback) { cmd0 = (void *) cmd1; cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; cmd0->header.size = sizeof(cmd0->body); cmd0->body.cid = vcotbl->ctx->id; cmd0->body.type = vcotbl->type; cmd1 = (void *) &cmd0[1]; vcotbl->size_read_back = res->guest_memory_size; } cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; cmd1->header.size = sizeof(cmd1->body); cmd1->body.cid = vcotbl->ctx->id; cmd1->body.type = vcotbl->type; cmd1->body.mobid = SVGA3D_INVALID_ID; cmd1->body.validSizeInBytes = 0; vmw_cmd_commit_flush(dev_priv, submit_size); vcotbl->scrubbed = true; /* Trigger a create() on next validate. */ res->id = -1; return 0; } /** * vmw_cotable_unbind - Cotable resource unbind callback * * @res: Pointer to the cotable resource. * @readback: Whether to read back cotable data to the backup buffer. * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller * for convenience / fencing. * * Unbinds the cotable from the device and fences the backup buffer. */ static int vmw_cotable_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf) { struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_private *dev_priv = res->dev_priv; struct ttm_buffer_object *bo = val_buf->bo; struct vmw_fence_obj *fence; if (!vmw_resource_mob_attached(res)) return 0; WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB); dma_resv_assert_held(bo->base.resv); mutex_lock(&dev_priv->binding_mutex); if (!vcotbl->scrubbed) vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); mutex_unlock(&dev_priv->binding_mutex); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); vmw_bo_fence_single(bo, fence); if (likely(fence != NULL)) vmw_fence_obj_unreference(&fence); return 0; } /** * vmw_cotable_readback - Read back a cotable without unbinding. * * @res: The cotable resource. * * Reads back a cotable to its backing mob without scrubbing the MOB from * the cotable. The MOB is fenced for subsequent CPU access. */ static int vmw_cotable_readback(struct vmw_resource *res) { struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_private *dev_priv = res->dev_priv; struct { SVGA3dCmdHeader header; SVGA3dCmdDXReadbackCOTable body; } *cmd; struct vmw_fence_obj *fence; if (!vcotbl->scrubbed) { cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; cmd->header.size = sizeof(cmd->body); cmd->body.cid = vcotbl->ctx->id; cmd->body.type = vcotbl->type; vcotbl->size_read_back = res->guest_memory_size; vmw_cmd_commit(dev_priv, sizeof(*cmd)); } (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence); vmw_fence_obj_unreference(&fence); return 0; } /** * vmw_cotable_resize - Resize a cotable. * * @res: The cotable resource. * @new_size: The new size. * * Resizes a cotable and binds the new backup buffer. * On failure the cotable is left intact. * Important! This function may not fail once the MOB switch has been * committed to hardware. That would put the device context in an * invalid state which we can't currently recover from. */ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) { struct ttm_operation_ctx ctx = { false, false }; struct vmw_private *dev_priv = res->dev_priv; struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_bo *buf, *old_buf = res->guest_memory_bo; struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo; size_t old_size = res->guest_memory_size; size_t old_size_read_back = vcotbl->size_read_back; size_t cur_size_read_back; struct ttm_bo_kmap_obj old_map, new_map; int ret; size_t i; struct vmw_bo_params bo_params = { .domain = VMW_BO_DOMAIN_MOB, .busy_domain = VMW_BO_DOMAIN_MOB, .bo_type = ttm_bo_type_device, .size = new_size, .pin = true }; MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE); MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE); ret = vmw_cotable_readback(res); if (ret) goto out_done; cur_size_read_back = vcotbl->size_read_back; vcotbl->size_read_back = old_size_read_back; /* * While device is processing, Allocate and reserve a buffer object * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); goto out_done; } bo = &buf->tbo; WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); ret = ttm_bo_wait(old_bo, false, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed waiting for cotable unbind.\n"); goto out_wait; } /* * Do a page by page copy of COTables. This eliminates slow vmap()s. * This should really be a TTM utility. */ for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) { bool dummy; ret = ttm_bo_kmap(old_bo, i, 1, &old_map); if (unlikely(ret != 0)) { DRM_ERROR("Failed mapping old COTable on resize.\n"); goto out_wait; } ret = ttm_bo_kmap(bo, i, 1, &new_map); if (unlikely(ret != 0)) { DRM_ERROR("Failed mapping new COTable on resize.\n"); goto out_map_new; } memcpy(ttm_kmap_obj_virtual(&new_map, &dummy), ttm_kmap_obj_virtual(&old_map, &dummy), PAGE_SIZE); ttm_bo_kunmap(&new_map); ttm_bo_kunmap(&old_map); } /* Unpin new buffer, and switch backup buffers. */ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = ttm_bo_validate(bo, &buf->placement, &ctx); if (unlikely(ret != 0)) { DRM_ERROR("Failed validating new COTable backup buffer.\n"); goto out_wait; } vmw_resource_mob_detach(res); res->guest_memory_bo = buf; res->guest_memory_size = new_size; vcotbl->size_read_back = cur_size_read_back; /* * Now tell the device to switch. If this fails, then we need to * revert the full resize. */ ret = vmw_cotable_unscrub(res); if (ret) { DRM_ERROR("Failed switching COTable backup buffer.\n"); res->guest_memory_bo = old_buf; res->guest_memory_size = old_size; vcotbl->size_read_back = old_size_read_back; vmw_resource_mob_attach(res); goto out_wait; } vmw_resource_mob_attach(res); /* Let go of the old mob. */ vmw_bo_unreference(&old_buf); res->id = vcotbl->type; ret = dma_resv_reserve_fences(bo->base.resv, 1); if (unlikely(ret)) goto out_wait; /* Release the pin acquired in vmw_bo_create */ ttm_bo_unpin(bo); MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); return 0; out_map_new: ttm_bo_kunmap(&old_map); out_wait: ttm_bo_unpin(bo); ttm_bo_unreserve(bo); vmw_bo_unreference(&buf); out_done: MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); return ret; } /** * vmw_cotable_create - Cotable resource create callback * * @res: Pointer to a cotable resource. * * There is no separate create command for cotables, so this callback, which * is called before bind() in the validation sequence is instead used for two * things. * 1) Unscrub the cotable if it is scrubbed and still attached to a backup * buffer. * 2) Resize the cotable if needed. */ static int vmw_cotable_create(struct vmw_resource *res) { struct vmw_cotable *vcotbl = vmw_cotable(res); size_t new_size = res->guest_memory_size; size_t needed_size; int ret; /* Check whether we need to resize the cotable */ needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size; while (needed_size > new_size) new_size *= 2; if (likely(new_size <= res->guest_memory_size)) { if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { ret = vmw_cotable_unscrub(res); if (ret) return ret; } res->id = vcotbl->type; return 0; } return vmw_cotable_resize(res, new_size); } /** * vmw_hw_cotable_destroy - Cotable hw_destroy callback * * @res: Pointer to a cotable resource. * * The final (part of resource destruction) destroy callback. */ static void vmw_hw_cotable_destroy(struct vmw_resource *res) { (void) vmw_cotable_destroy(res); } /** * vmw_cotable_free - Cotable resource destructor * * @res: Pointer to a cotable resource. */ static void vmw_cotable_free(struct vmw_resource *res) { kfree(res); } /** * vmw_cotable_alloc - Create a cotable resource * * @dev_priv: Pointer to a device private struct. * @ctx: Pointer to the context resource. * The cotable resource will not add a refcount. * @type: The cotable type. */ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, struct vmw_resource *ctx, u32 type) { struct vmw_cotable *vcotbl; int ret; u32 num_entries; vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); if (unlikely(!vcotbl)) { ret = -ENOMEM; goto out_no_alloc; } ret = vmw_resource_init(dev_priv, &vcotbl->res, true, vmw_cotable_free, &vmw_cotable_func); if (unlikely(ret != 0)) goto out_no_init; INIT_LIST_HEAD(&vcotbl->resource_list); vcotbl->res.id = type; vcotbl->res.guest_memory_size = PAGE_SIZE; num_entries = PAGE_SIZE / co_info[type].size; if (num_entries < co_info[type].min_initial_entries) { vcotbl->res.guest_memory_size = co_info[type].min_initial_entries * co_info[type].size; vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size); } vcotbl->scrubbed = true; vcotbl->seen_entries = -1; vcotbl->type = type; vcotbl->ctx = ctx; vcotbl->res.hw_destroy = vmw_hw_cotable_destroy; return &vcotbl->res; out_no_init: kfree(vcotbl); out_no_alloc: return ERR_PTR(ret); } /** * vmw_cotable_notify - Notify the cotable about an item creation * * @res: Pointer to a cotable resource. * @id: Item id. */ int vmw_cotable_notify(struct vmw_resource *res, int id) { struct vmw_cotable *vcotbl = vmw_cotable(res); if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) { DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n", (unsigned) vcotbl->type, id); return -EINVAL; } if (vcotbl->seen_entries < id) { /* Trigger a call to create() on next validate */ res->id = -1; vcotbl->seen_entries = id; } return 0; } /** * vmw_cotable_add_resource - add a view to the cotable's list of active views. * * @res: pointer struct vmw_resource representing the cotable. * @head: pointer to the struct list_head member of the resource, dedicated * to the cotable active resource list. */ void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) { struct vmw_cotable *vcotbl = container_of(res, struct vmw_cotable, res); list_add_tail(head, &vcotbl->resource_list); }
linux-master
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c